Learn how to use DreamLayer AI effectively with API examples, workflow management, and advanced features based on the actual codebase.
# Basic prompts
"A beautiful sunset over mountains, digital art"
"A cute cat sitting on a windowsill, watercolor style"
"A futuristic cityscape at night, neon lights, cyberpunk"
# Advanced prompts with modifiers
"A portrait of a woman, masterpiece, best quality, highly detailed, 8k uhd, dslr, high quality, film grain, Fujifilm XT3"
"A landscape painting, oil on canvas, impressionist style, Claude Monet, soft lighting, pastel colors"
curl -X GET http://localhost:5000/api/models
Response:
{
"status": "success",
"models": [
{
"id": "sd15.safetensors",
"name": "Sd15",
"filename": "sd15.safetensors"
},
{
"id": "flux-pro",
"name": "FLUX Pro",
"filename": "flux-pro"
}
]
}
curl -X GET http://localhost:5000/api/lora-models
curl -X GET http://localhost:5000/api/upscaler-models
curl -X GET http://localhost:5000/api/controlnet/models
curl -X GET http://localhost:5000/api/fetch-prompt
Response:
{
"positive": "A majestic dragon soaring through clouds",
"negative": "blurry, low quality, distorted"
}
curl -X POST http://localhost:5000/api/upload-controlnet-image \
-F "image=@path/to/your/image.png"
Response:
{
"status": "success",
"filename": "DreamLayer_CN_00001_.png"
}
curl -X POST http://localhost:5000/api/send-to-img2img \
-H "Content-Type: application/json" \
-d '{"imagePath": "/path/to/image.png"}'
curl -X POST http://localhost:5000/api/send-to-extras \
-H "Content-Type: application/json" \
-d '{"imagePath": "/path/to/image.png"}'
curl -X POST http://localhost:5000/api/show-in-folder \
-H "Content-Type: application/json" \
-d '{"filePath": "/path/to/image.png"}'
curl -X POST http://localhost:5000/api/settings/paths \
-H "Content-Type: application/json" \
-d '{
"outputDirectory": "/path/to/output",
"modelsDirectory": "/path/to/models"
}'
DreamLayer supports multiple generation modes:
import requests
import json
# Load workflow
with open('workflows/txt2img/core_generation_workflow.json', 'r') as f:
workflow = json.load(f)
# Modify workflow parameters
workflow['prompt']['text'] = "A beautiful sunset over mountains"
workflow['negative_prompt']['text'] = "blurry, low quality"
# Send to ComfyUI
response = requests.post('http://localhost:8188/prompt', json={
'prompt': workflow
})
print(f"Generation started: {response.json()}")
import requests
# DALL-E 3 generation
def generate_dalle3(prompt, api_key):
response = requests.post('https://api.openai.com/v1/images/generations',
headers={'Authorization': f'Bearer {api_key}'},
json={
'model': 'dall-e-3',
'prompt': prompt,
'n': 1,
'size': '1024x1024'
}
)
return response.json()
# Ideogram generation
def generate_ideogram(prompt, api_key):
response = requests.post('https://api.ideogram.ai/api/generation',
headers={'Authorization': f'Bearer {api_key}'},
json={
'prompt': prompt,
'aspect_ratio': '1:1'
}
)
return response.json()
# Load img2img workflow
with open('workflows/img2img/core_generation_workflow.json', 'r') as f:
workflow = json.load(f)
# Set input image
workflow['input_image']['image'] = "path/to/input/image.png"
workflow['prompt']['text'] = "Transform this into a painting"
# Send to ComfyUI
response = requests.post('http://localhost:8188/prompt', json={
'prompt': workflow
})
# Upload ControlNet image
with open('control_image.png', 'rb') as f:
files = {'image': f}
response = requests.post('http://localhost:5000/api/upload-controlnet-image',
files=files)
controlnet_image = response.json()['filename']
# Use in workflow
workflow['controlnet']['image'] = controlnet_image
workflow['controlnet']['strength'] = 0.8
DreamLayer includes several pre-configured workflows:
workflows/txt2img/core_generation_workflow.json
- Basic text-to-imageworkflows/txt2img/bfl_core_generation_workflow.json
- FLUX API integrationworkflows/txt2img/dalle_core_generation_workflow.json
- DALL-E integrationworkflows/img2img/core_generation_workflow.json
- Basic image-to-imageworkflows/img2img/bfl_core_generation_workflow.json
- FLUX img2imgCreate custom workflows by modifying the JSON files:
{
"prompt": {
"text": "Your prompt here",
"weight": 1.0
},
"negative_prompt": {
"text": "Your negative prompt here",
"weight": 1.0
},
"model": {
"name": "sd15.safetensors",
"strength": 1.0
},
"sampler": {
"name": "euler",
"steps": 20,
"cfg": 7.0
},
"output": {
"width": 512,
"height": 512,
"batch_size": 1
}
}
Set up API keys for cloud models:
# .env file
OPENAI_API_KEY=your_openai_api_key_here
IDEOGRAM_API_KEY=your_ideogram_api_key_here
BFL_API_KEY=your_bfl_api_key_here
DreamLayer/
βββ dream_layer_backend/
β βββ dream_layer.py # Main Flask API
β βββ txt2img_server.py # Text-to-image server
β βββ img2img_server.py # Image-to-image server
β βββ controlnet.py # ControlNet integration
βββ dream_layer_frontend/ # React frontend
βββ ComfyUI/ # ComfyUI engine
βββ workflows/ # Pre-configured workflows
β βββ txt2img/
β βββ img2img/
βββ Dream_Layer_Resources/ # Output and resources
βββ output/ # Generated images
# Clear GPU memory after generation
import torch
torch.cuda.empty_cache()
# Use CPU offloading for large models
workflow['model']['device'] = 'cpu'
# Check Flask API
curl http://localhost:5000/api/models
# Check ComfyUI
curl http://localhost:8188/system_stats
# Check frontend
curl http://localhost:8080
# Flask API logs
tail -f logs/dream_layer.log
# ComfyUI logs
tail -f ComfyUI/logs/comfyui.log
# Validate workflow before sending
def validate_workflow(workflow):
required_fields = ['prompt', 'model', 'sampler']
for field in required_fields:
if field not in workflow:
raise ValueError(f"Missing required field: {field}")
return True
# Test workflow
try:
validate_workflow(workflow)
response = requests.post('http://localhost:8188/prompt', json={'prompt': workflow})
print(f"Workflow sent successfully: {response.json()}")
except Exception as e:
print(f"Workflow validation failed: {e}")
For more advanced usage, see the API Reference and Architecture Guide.