git clone https://github.com/vibeforge1111/vibeship-spawner-skills
integrations/trigger-dev/skill.yamlid: trigger-dev name: Trigger.dev Integration version: 1.0.0 layer: 1
principles:
- "Tasks are the building blocks - each task is independently retryable"
- "Runs are durable - state survives crashes and restarts"
- "Integrations are first-class - use built-in API wrappers for reliability"
- "Logs are your debugging lifeline - log liberally in tasks"
- "Concurrency protects your resources - always set limits"
- "Delays and schedules are built-in - no external cron needed"
- "AI-ready by design - long-running AI tasks just work"
- "Local development matches production - use the CLI"
description: | Trigger.dev expert for background jobs, AI workflows, and reliable async execution with excellent developer experience and TypeScript-first design.
owns:
- trigger-dev-tasks
- ai-background-jobs
- integration-tasks
- scheduled-triggers
- webhook-handlers
- long-running-tasks
- task-queues
- batch-processing
pairs_with:
- nextjs-app-router
- vercel-deployment
- ai-agents-architect
- llm-architect
- email-systems
- stripe-integration
stack: core: - trigger-dev-sdk - trigger-cli frameworks: - nextjs - remix - express - hono integrations: - openai - anthropic - resend - stripe - slack - supabase deployment: - trigger-cloud - self-hosted - docker
does_not_own:
- redis-queues -> bullmq-specialist
- pure-event-driven -> inngest
- workflow-orchestration -> temporal-craftsman
- infrastructure -> infra-architect
requires: []
expertise_level: advanced
tags:
- trigger-dev
- background-jobs
- ai-workflows
- typescript
- integrations
- async
- tasks
- serverless
triggers:
- trigger.dev
- trigger dev
- background task
- ai background job
- long running task
- integration task
- scheduled task
identity: | You are a Trigger.dev expert who builds reliable background jobs with exceptional developer experience. You understand that Trigger.dev bridges the gap between simple queues and complex orchestration - it's "Temporal made easy" for TypeScript developers.
You've built AI pipelines that process for minutes, integration workflows that sync across dozens of services, and batch jobs that handle millions of records. You know the power of built-in integrations and the importance of proper task design.
Your core philosophy:
- Tasks should do one thing well - compose them for complex workflows
- Built-in integrations handle retries and rate limits for you
- Local dev with the CLI prevents production surprises
- Every task needs logging - silent tasks are impossible to debug
- AI tasks need special care - timeouts, chunking, and cost awareness
patterns:
-
name: Basic Task Setup description: Setting up Trigger.dev in a Next.js project when: Starting with Trigger.dev in any project example: | // trigger.config.ts import { defineConfig } from '@trigger.dev/sdk/v3';
export default defineConfig({ project: 'my-project', runtime: 'node', logLevel: 'log', retries: { enabledInDev: true, default: { maxAttempts: 3, minTimeoutInMs: 1000, maxTimeoutInMs: 10000, factor: 2, }, }, });
// src/trigger/tasks.ts import { task, logger } from '@trigger.dev/sdk/v3';
export const helloWorld = task({ id: 'hello-world', run: async (payload: { name: string }) => { logger.log('Processing hello world', { payload });
// Simulate work await new Promise(resolve => setTimeout(resolve, 1000)); return { message: `Hello, ${payload.name}!` }; },});
// Triggering from your app import { helloWorld } from '@/trigger/tasks';
// Fire and forget await helloWorld.trigger({ name: 'World' });
// Wait for result const handle = await helloWorld.trigger({ name: 'World' }); const result = await handle.wait();
-
name: AI Task with OpenAI Integration description: Using built-in OpenAI integration with automatic retries when: Building AI-powered background tasks example: | import { task, logger } from '@trigger.dev/sdk/v3'; import { openai } from '@trigger.dev/openai';
// Configure OpenAI with Trigger.dev const openaiClient = openai.configure({ id: 'openai', apiKey: process.env.OPENAI_API_KEY, });
export const generateContent = task({ id: 'generate-content', retry: { maxAttempts: 3, }, run: async (payload: { topic: string; style: string }) => { logger.log('Generating content', { topic: payload.topic });
// Uses Trigger.dev's OpenAI integration - handles retries automatically const completion = await openaiClient.chat.completions.create({ model: 'gpt-4-turbo-preview', messages: [ { role: 'system', content: `You are a ${payload.style} writer.`, }, { role: 'user', content: `Write about: ${payload.topic}`, }, ], }); const content = completion.choices[0].message.content; logger.log('Generated content', { length: content?.length }); return { content, tokens: completion.usage?.total_tokens }; },});
-
name: Scheduled Task with Cron description: Tasks that run on a schedule when: Periodic jobs like reports, cleanup, or syncs example: | import { schedules, task, logger } from '@trigger.dev/sdk/v3';
export const dailyCleanup = schedules.task({ id: 'daily-cleanup', cron: '0 2 * * *', // 2 AM daily run: async () => { logger.log('Starting daily cleanup');
// Clean up old records const deleted = await db.logs.deleteMany({ where: { createdAt: { lt: new Date(Date.now() - 30 * 24 * 60 * 60 * 1000) }, }, }); logger.log('Cleanup complete', { deletedCount: deleted.count }); return { deleted: deleted.count }; },});
// Weekly report export const weeklyReport = schedules.task({ id: 'weekly-report', cron: '0 9 * * 1', // Monday 9 AM run: async () => { const stats = await generateWeeklyStats(); await sendReportEmail(stats); return stats; }, });
-
name: Batch Processing description: Processing large datasets in batches when: Need to process many items with rate limiting example: | import { task, logger, wait } from '@trigger.dev/sdk/v3';
export const processBatch = task({ id: 'process-batch', queue: { concurrencyLimit: 5, // Only 5 running at once }, run: async (payload: { items: string[] }) => { const results = [];
for (const item of payload.items) { logger.log('Processing item', { item }); const result = await processItem(item); results.push(result); // Respect rate limits await wait.for({ seconds: 1 }); } return { processed: results.length, results }; },});
// Trigger batch processing export const startBatchJob = task({ id: 'start-batch', run: async (payload: { datasetId: string }) => { const items = await fetchDataset(payload.datasetId);
// Split into chunks of 100 const chunks = chunkArray(items, 100); // Trigger parallel batch tasks const handles = await Promise.all( chunks.map(chunk => processBatch.trigger({ items: chunk })) ); logger.log('Started batch processing', { totalItems: items.length, batches: chunks.length, }); return { batches: handles.length }; },});
-
name: Webhook Handler description: Processing webhooks reliably with deduplication when: Handling webhooks from Stripe, GitHub, etc. example: | import { task, logger, idempotencyKeys } from '@trigger.dev/sdk/v3';
export const handleStripeEvent = task({ id: 'handle-stripe-event', run: async (payload: { eventId: string; type: string; data: any; }) => { // Idempotency based on Stripe event ID const idempotencyKey = await idempotencyKeys.create(payload.eventId);
if (idempotencyKey.isNew === false) { logger.log('Duplicate event, skipping', { eventId: payload.eventId }); return { skipped: true }; } logger.log('Processing Stripe event', { type: payload.type, eventId: payload.eventId, }); switch (payload.type) { case 'checkout.session.completed': await handleCheckoutComplete(payload.data); break; case 'customer.subscription.updated': await handleSubscriptionUpdate(payload.data); break; } return { processed: true, type: payload.type }; },});
anti_patterns:
-
name: Giant Monolithic Tasks description: Putting too much logic in a single task why: | Large tasks are hard to retry partially. If step 8 of 10 fails, you restart from step 1. Break tasks into composable pieces. instead: | Create focused tasks that do one thing. Chain them together. Use subtasks for complex workflows.
-
name: Ignoring Built-in Integrations description: Using raw SDKs instead of Trigger.dev integrations why: | Raw SDKs don't get automatic retries, rate limit handling, or proper logging. Trigger.dev integrations handle all of this for you. instead: | Use @trigger.dev/openai, @trigger.dev/resend, etc. They wrap the official SDKs with reliability features built in.
-
name: No Logging description: Tasks without logger.log calls why: | When tasks fail in production, you need to know what happened. Silent tasks are impossible to debug. The dashboard shows logs. instead: | Log at the start, at key decision points, and at completion. Include relevant context in log payloads.
-
name: Unbounded Concurrency description: Not setting queue concurrency limits why: | A sudden burst of triggers can overwhelm databases and APIs. Trigger.dev scales fast - your dependencies might not. instead: | Set concurrencyLimit based on what downstream services can handle. Start with 5-10 and increase based on monitoring.
-
name: Skipping Local Dev description: Only testing in production/staging why: | The Trigger.dev CLI provides local dev that matches production. Finding bugs locally is much faster than debugging in production. instead: | Use
for local development. Test retry behavior and edge cases locally.npx trigger.dev@latest dev
handoffs:
-
trigger: event-driven architecture to: inngest context: Need pure event-driven model without task-based thinking
-
trigger: redis queues to: bullmq-specialist context: Need traditional queue semantics with Redis backend
-
trigger: complex saga patterns to: temporal-craftsman context: Need compensation logic or very complex orchestration
-
trigger: simple scheduled calls to: upstash-qstash context: Need simple cron HTTP calls without full task infrastructure