Text to Image Generations

import os

import requests

url = "<https://doc-ai.si.online/v1/images/text2image>"

payload = {
    "model": "stabilityai/stable-diffusion-3-5-large",
    "prompt": "sky, blurry, blue sky, flower, day, depth of field",
    "negative_prompt": "",
    "image_size": "1024x1024",
    "batch_size": 1,
    "seed": 535342,
    "num_inference_steps": 20,
    "guidance_scale": 7.5,
}
headers = {
    "Authorization": f"Bearer {os.getenv('SI_API_KEY')}",
    "Content-Type": "application/json",
}

response = requests.request("POST", url, json=payload, headers=headers)

print(response.Text)

Via OpenAI API

import os

from openai import OpenAI

client = OpenAI(api_key=os.getenv("SI_API_KEY"), base_url="<https://doc-ai.si.online/v1>")
response = client.images.generate(
    model="stabilityai/stable-diffusion-3-5-large",
    prompt="sky, blurry, blue sky, flower, day, depth of field",
    size="512x512",
)

print(response.model_dump())

Image to Image Generations

import base64
import os
from io import BytesIO

import requests
from PIL import Image

def encode_image(image_input: str) -> str:

    img = Image.open(image_input)
    output_buffer = BytesIO()
    img.save(output_buffer, format="WEBP")
    webp_data = output_buffer.getvalue()
    base64_webp = base64.b64encode(webp_data).decode("utf-8")
    return f"data:image/webp;base64,{base64_webp}"

url = "<https://doc-ai.si.online/v1/images/image2image>"

payload = {
    "model": "stabilityai/stable-diffusion-xl-base-1.0",
    "prompt": "sky, blurry, blue sky, flower, day, depth of field",
    "image_size": "1024x1024",
    "image": encode_image("images.jpg"),
}
headers = {
    "Authorization": f"Bearer {os.getenv('SI_API_KEY')}",
    "Content-Type": "application/json",
}

response = requests.request("POST", url, json=payload, headers=headers)

print(response. Text)