#!/usr/bin/env python3
"""Submit Olay product placement jobs to ComfyUI."""

import json, requests, uuid, sys

API = "http://127.0.0.1:8188"
INPUT_IMAGE = "olay_product.jpg"
CLIENT_ID = str(uuid.uuid4())

def submit(prompt_dict, label=""):
    payload = {"prompt": prompt_dict, "client_id": CLIENT_ID}
    r = requests.post(f"{API}/prompt", json=payload)
    if r.status_code == 200:
        pid = r.json().get("prompt_id", "?")
        print(f"✅ Queued [{label}]: {pid}")
        return pid
    else:
        print(f"❌ Failed [{label}]: {r.status_code} {r.text[:200]}")
        return None

# ── 1. IC-Light scenes (uses actual product image + IC-Light relighting) ──────

ICLIGHT_BASE = {
    "1": {"class_type": "LoadImage",             "inputs": {"image": INPUT_IMAGE}},
    "2": {"class_type": "BiRefNetRMBG",          "inputs": {"image": ["1",0], "model": "BiRefNet-general"}},
    "3": {"class_type": "ICLightApplyMaskGrey",  "inputs": {"image": ["2",0], "mask": ["2",1]}},
    "4": {"class_type": "CheckpointLoaderSimple","inputs": {"ckpt_name": "RealisticVision_V6_fp16.safetensors"}},
    "5": {"class_type": "LoadAndApplyICLightUnet","inputs": {"model_name": "iclight_sd15_fbc_unet_ldm.safetensors", "model": ["4",0]}},
    "7": {"class_type": "CLIPTextEncode",        "inputs": {"text": "blurry, low quality, watermark, deformed, dark, ugly, shadows clipping", "clip": ["4",1]}},
    "8": {"class_type": "VAEEncode",             "inputs": {"pixels": ["3",0], "vae": ["4",2]}},
    "9": {"class_type": "ICLightConditioning",   "inputs": {"positive": ["6",0], "negative": ["7",0], "vae": ["4",2], "foreground": ["8",0], "multiplier": 1.0}},
    "10":{"class_type": "KSampler",              "inputs": {"model": ["5",0], "positive": ["9",0], "negative": ["9",1], "latent_image": ["9",2], "seed": 42, "steps": 25, "cfg": 2.0, "sampler_name": "dpmpp_2m", "scheduler": "karras", "denoise": 1.0}},
    "11":{"class_type": "VAEDecode",             "inputs": {"samples": ["10",0], "vae": ["4",2]}},
}

scenes = [
    ("luxury marble bathroom countertop, soft warm studio lighting, elegant skincare display, white marble, gold accents, photorealistic", "iclight_marble"),
    ("spa wooden shelf surrounded by green plants and eucalyptus, natural light, zen aesthetic, photorealistic product photography", "iclight_spa"),
    ("modern minimalist bathroom, concrete grey surface, dramatic side lighting, editorial cosmetics photography, sharp detail", "iclight_editorial"),
    ("rustic wooden table with morning coffee and flowers, warm golden hour light streaming through window, cozy lifestyle photography", "iclight_morning"),
]

for pos_text, label in scenes:
    p = json.loads(json.dumps(ICLIGHT_BASE))  # deep copy
    p["6"] = {"class_type": "CLIPTextEncode", "inputs": {"text": pos_text, "clip": ["4",1]}}
    p["12"]= {"class_type": "SaveImage",      "inputs": {"images": ["11",0], "filename_prefix": f"olay/{label}"}}
    submit(p, label)

# ── 2. Composite scenes (RMBG cutout composited onto generated background) ───

COMPOSITE_BASE = {
    "1": {"class_type": "LoadImage",            "inputs": {"image": INPUT_IMAGE}},
    "2": {"class_type": "BiRefNetRMBG",         "inputs": {"image": ["1",0], "model": "BiRefNet-general"}},
    "10":{"class_type": "UNETLoader",           "inputs": {"unet_name": "z_image_turbo_bf16.safetensors", "weight_dtype": "default"}},
    "11":{"class_type": "CLIPLoader",           "inputs": {"clip_name": "qwen_3_4b.safetensors", "type": "lumina2"}},
    "12":{"class_type": "VAELoader",            "inputs": {"vae_name": "z_image_turbo_ae.safetensors"}},
    "14":{"class_type": "CLIPTextEncode",       "inputs": {"text": "blurry, dark, cluttered, low quality, watermark, people, hands", "clip": ["11",0]}},
    "15":{"class_type": "EmptyLatentImage",     "inputs": {"width": 1024, "height": 1024, "batch_size": 1}},
    "16":{"class_type": "KSampler",             "inputs": {"model": ["10",0], "positive": ["13",0], "negative": ["14",0], "latent_image": ["15",0], "seed": 99, "steps": 9, "cfg": 1.0, "sampler_name": "euler", "scheduler": "simple", "denoise": 1.0}},
    "17":{"class_type": "VAEDecode",            "inputs": {"samples": ["16",0], "vae": ["12",0]}},
    "18":{"class_type": "ImageCompositeMasked", "inputs": {"destination": ["17",0], "source": ["2",0], "x": 312, "y": 312, "resize_source": False, "mask": ["2",1]}},
}

bg_scenes = [
    ("luxury lifestyle background, white marble countertop, soft morning light, minimalist interior, warm tones, shallow depth of field, no product, empty surface, editorial photography", "composite_marble"),
    ("outdoor garden terrace, stone surface, lush greenery blurred in background, natural daylight, fresh and clean aesthetic, no product", "composite_garden"),
    ("dark moody luxury, black granite surface, dramatic rim lighting, dark background with subtle bokeh, premium brand aesthetic, no product", "composite_moody"),
]

for pos_text, label in bg_scenes:
    p = json.loads(json.dumps(COMPOSITE_BASE))
    p["13"] = {"class_type": "CLIPTextEncode", "inputs": {"text": pos_text, "clip": ["11",0]}}
    p["19"] = {"class_type": "SaveImage",       "inputs": {"images": ["18",0], "filename_prefix": f"olay/{label}"}}
    submit(p, label)

print("\nAll jobs queued. View at http://fred:8188")
