#!/usr/bin/env python3
"""HunyuanVideo I2V - Attempt 15: Native Sampler + Wrapper Loader"""

import json
import urllib.request
import urllib.error
import uuid

COMFY_URL = "http://localhost:8188"
INPUT_IMAGE = "pool_party.png"

PROMPT = """Two men stand beside a swimming pool at sunset. The man on the left gently places his two hands on the other man's waist. Very slowly, they both rotate their bodies to face each other. Their arms remain attached to their shoulders. They embrace naturally and lean in for a tender, romantic kiss. Only four hands total are visible in the scene, resting on each other's bodies. Smooth, cinematic motion. Photorealistic quality. No extra limbs, no hands appearing from the edges. Static camera shot. The camera remains perfectly still. No zooming, no panning."""

workflow = {
    "1": {"class_type": "LoadImage", "inputs": {"image": INPUT_IMAGE, "upload": "image"}},
    
    "2": {"class_type": "HyVideoModelLoader", "inputs": {
        "model": "hunyuan/hunyuan_video_I2V_720_fixed_fp8_e4m3fn.safetensors",
        "base_precision": "bf16",
        "quantization": "fp8_e4m3fn",
        "load_device": "offload_device"
    }},
    
    "3": {"class_type": "HyVideoVAELoader", "inputs": {
        "model_name": "hunyuan_video_vae_bf16.safetensors",
        "precision": "bf16"
    }},
    
    "4": {"class_type": "DownloadAndLoadHyVideoTextEncoder", "inputs": {
        "llm_model": "Kijai/llava-llama-3-8b-text-encoder-tokenizer",
        "clip_model": "openai/clip-vit-large-patch14",
        "precision": "bf16",
        "quantization": "fp8_e4m3fn",
        "load_device": "offload_device"
    }},
    
    "5": {"class_type": "HyVideoEncode", "inputs": {
        "vae": ["3", 0],
        "image": ["1", 0],
        "enable_vae_tiling": True,
        "temporal_tiling_sample_size": 64,
        "spatial_tile_sample_min_size": 256,
        "auto_tile_size": True
    }},
    
    "6": {"class_type": "HyVideoI2VEncode", "inputs": {
        "text_encoders": ["4", 0],
        "prompt": PROMPT,
        "force_offload": True,
        "image": ["1", 0],
        "image_embed_interleave": 4
    }},
    
    "7": {"class_type": "HyVideoSampler", "inputs": {
        "model": ["2", 0],
        "hyvid_embeds": ["6", 0],
        "width": 1280,
        "height": 720,
        "num_frames": 81,
        "steps": 30,
        "embedded_guidance_scale": 6.0,
        "flow_shift": 9.0,
        "seed": 42,
        "force_offload": True,
        "scheduler": "FlowMatchDiscreteScheduler",
        "image_cond_latents": ["5", 0]
    }},
    
    "8": {"class_type": "HyVideoDecode", "inputs": {
        "vae": ["3", 0],
        "samples": ["7", 0],
        "enable_vae_tiling": True,
        "auto_tile_size": True,
        "temporal_tiling_sample_size": 64,
        "spatial_tile_sample_min_size": 256
    }},
    
    "9": {"class_type": "VHS_VideoCombine", "inputs": {
        "images": ["8", 0],
        "frame_rate": 24,
        "loop_count": 0,
        "filename_prefix": "Hunyuan_Attempt15_Fixed720p",
        "format": "video/h264-mp4",
        "pix_fmt": "yuv420p",
        "crf": 16,
        "save_metadata": True,
        "pingpong": False,
        "save_output": True
    }}
}

def submit():
    prompt_id = str(uuid.uuid4())
    data = {"prompt": workflow, "client_id": "selena"}
    req = urllib.request.Request(
        f"{COMFY_URL}/prompt",
        data=json.dumps(data).encode('utf-8'),
        headers={'Content-Type': 'application/json'}
    )
    try:
        with urllib.request.urlopen(req) as response:
            result = json.load(response)
            print(f"✅ Submitted! ID: {result.get('prompt_id')}")
    except urllib.error.HTTPError as e:
        print(f"❌ Error: {e.read().decode('utf-8')[:1500]}")

if __name__ == "__main__":
    submit()
