Back to APIs

StabilityAI

stability.ai

Generate text, images and audio with Stability AI.

Using the StabilityAI API with Trigger.dev

You can use Trigger.dev with any existing Node SDK or even just using fetch. Using io.runTask makes your StabilityAI background job resumable and appear in our dashboard.

Use io.runTask() and the official SDK or fetch.

Example code using StabilityAI

Below is a working code example of how you can use StabilityAI with Trigger.dev. These samples are open source and maintained by the community, you can copy and paste them into your own projects.

1
import { TriggerClient, eventTrigger } from "@trigger.dev/sdk";
2
import { z } from "zod";
3
4
// This code uses the REST API for Stability AI
5
// Documentation can be found at: https://platform.stability.ai/docs/api-reference
6
const engineId = "stable-diffusion-xl-1024-v1-0";
7
const apiHost = process.env.API_HOST ?? "https://api.stability.ai";
8
const apiKey = process.env.STABILITY_API_KEY;
9
10
interface GenerationResponse {
11
artifacts: Array<{
12
base64: string;
13
seed: number;
14
finishReason: string;
15
}>;
16
}
17
18
client.defineJob({
19
id: "stability-ai-text-to-image",
20
name: "Stability AI Text to Image",
21
version: "1.0.0",
22
trigger: eventTrigger({
23
name: "stability.text.to.image",
24
// Define the schema for text prompts used for image generation.
25
// Weights can be positive or negative to influence the generation.
26
//
27
// text_prompts: [
28
// {
29
// "text": "A painting of a cat",
30
// "weight": 1
31
// },
32
// {
33
// "text": "blurry, bad",
34
// "weight": -1
35
// }
36
// ]
37
//
38
// For more details, refer to the text-to-image endpoint documentation:
39
// https://platform.stability.ai/docs/api-reference#tag/v1generation/operation/textToImage
40
schema: z.object({
41
text_prompts: z.array(
42
z.object({
43
text: z.string(),
44
weight: z.number().optional(),
45
})
46
),
47
cfg_scale: z.number().optional(),
48
width: z.number().optional(),
49
height: z.number().optional(),
50
steps: z.number().optional(),
51
samples: z.number().optional(),
52
}),
53
}),
54
run: async (payload, io, ctx) => {
55
// Here we use `backgroundFetch` which allows you to fetch data from
56
// a URL that can take longer than the serverless timeout.
57
const response = (await io.backgroundFetch(
58
"create-image-from-text",
59
`${apiHost}/v1/generation/${engineId}/text-to-image`,
60
{
61
method: "POST",
62
headers: {
63
"Content-Type": "application/json",
64
Accept: "application/json",
65
Authorization: `Bearer ${apiKey}`,
66
},
67
body: JSON.stringify({
68
text_prompts: payload.text_prompts,
69
cfg_scale: payload.cfg_scale ?? 7,
70
height: payload.height ?? 1024,
71
width: payload.width ?? 1024,
72
steps: payload.steps ?? 50,
73
samples: payload.samples ?? 1,
74
}),
75
},
76
{
77
"429": {
78
strategy: "backoff",
79
limit: 10,
80
minTimeoutInMs: 1000,
81
maxTimeoutInMs: 60000,
82
factor: 2,
83
randomize: true,
84
},
85
}
86
)) as GenerationResponse;
87
88
// Do something with the returned image(s).
89
// Learn about using cache keys with loops here:
90
// https://trigger.dev/docs/documentation/concepts/resumability#how-to-use-cache-keys-with-loops
91
for (const [index, artifact] of response.artifacts.entries()) {
92
await io.runTask(
93
`Image ${index + 1}/${payload.samples ?? 1}`,
94
async () => {
95
const imageUrl = `data:image/png;base64,${artifact.base64}`;
96
return imageUrl;
97
}
98
);
99
}
100
101
return response;
102
},
103
});