"""
Abstracted image generation utilities for the Lumabit pipeline.
Provides reusable image generation functionality across different pipeline types.
"""
import os
import io
import requests
import base64
import datetime
import shutil
import tempfile
from typing import Dict, Any, Optional, Tuple
from slugify import slugify
from openai import OpenAI
from PIL import Image, ImageOps
from google import genai
from google.genai import types

from utils.io import ensure_dir
from utils.cost_tracker import track_image_cost
from utils.storage import (
    use_local_storage,
    path_exists,
    write_json,
    write_bytes,
    read_json,
    write_text,
    copy_path,
    remove_path,
)


def generate_image_with_openai(
    description: str,
    context: str = "",
    model: str = "gpt-image-1",
    size: str = "1024x1024",
    quality: str = "standard",
    style: str = "natural",
    chain_name: str = "",
    pipeline: str = "",
    run_id: str = ""
) -> Dict[str, Any]:
    """
    Generate an image using OpenAI's image generation API.

    Args:
        description: Description of the image to generate
        context: Additional context to inform image generation
        model: OpenAI image model to use
        size: Image size (1024x1024, 1024x1792, 1792x1024)
        quality: Image quality (standard, hd)
        style: Image style (natural, vivid)
        chain_name: Name of the chain calling this function (for cost tracking)
        pipeline: Name of the pipeline (for cost tracking)
        run_id: Run identifier (for cost tracking)

    Returns:
        Dictionary with image generation result
    """
    # Combine description and context for the prompt
    prompt = description
    if context:
        prompt = f"{description}\nContext: {context}"

    # Limit prompt length for API
    if len(prompt) > 4000:
        prompt = prompt[:4000]

    try:
        client = OpenAI()
        # The OpenAI image API for the installed SDK accepts model, prompt, n and size.
        # Some server versions reject extra params like `quality`, `style`, or `response_format`.
        # Keep `quality` and `style` as local metadata only and do not send `response_format`.
        response = client.images.generate(
            model=model,
            prompt=prompt,
            n=1,
            size=size
        )

        if not response.data:
            raise ValueError("No image data returned from image generation API")

        image_info = response.data[0]

        if image_info.url:
            # Download the image from URL
            image_response = requests.get(image_info.url, timeout=30)
            image_response.raise_for_status()
            image_data = image_response.content
        elif image_info.b64_json:
            # Decode base64 if URL is not available
            image_data = base64.b64decode(image_info.b64_json)
        else:
            raise ValueError("No image URL or base64 data returned from image generation API")

        # Calculate approximate prompt tokens (rough estimation: ~4 chars per token)
        prompt_tokens = max(1, len(prompt) // 4)

        # Track the cost of this image generation
        cost_result = track_image_cost(
            model_name=model,
            image_count=1,
            image_size=size,
            quality=quality,
            prompt_tokens=prompt_tokens,
            chain_name=chain_name,
            pipeline=pipeline,
            run_id=run_id
        )

        return {
            "success": True,
            "image_data": image_data,
            "model": model,
            "size": size,
            "quality": quality,
            "style": style,
            "prompt": prompt,
            "revised_prompt": getattr(image_info, 'revised_prompt', None),
            "cost_tracking": {
                "cost": cost_result.total_cost,
                "cost_details": str(cost_result)
            }
        }

    except Exception as e:
        print(f"Error generating image: {e}")
        return {
            "success": False,
            "error": str(e),
            "model": model,
            "size": size,
            "prompt": prompt
        }


def generate_image_with_google(
    description: str,
    context: str = "",
    model: str = "gemini-2.5-flash-image",
    size: str = "1024x1024",
    quality: str = "standard",
    chain_name: str = "",
    pipeline: str = "",
    run_id: str = ""
) -> Dict[str, Any]:
    """
    Generate an image using Google's Gemini image generation API.

    Args:
        description: Description of the image to generate
        context: Additional context to inform image generation
        model: Google image model to use
        size: Logical size label for downstream cost tracking
        quality: Logical quality label for downstream cost tracking
        chain_name: Name of the chain calling this function (for cost tracking)
        pipeline: Name of the pipeline (for cost tracking)
        run_id: Run identifier (for cost tracking)

    Returns:
        Dictionary with image generation result
    """
    prompt = description
    if context:
        prompt = f"{description}\nContext: {context}"

    if len(prompt) > 4000:
        prompt = prompt[:4000]

    api_key = os.getenv("GOOGLE_API_KEY")
    if not api_key:
        return {
            "success": False,
            "error": "GOOGLE_API_KEY is not set",
            "model": model,
            "size": size,
            "prompt": prompt
        }

    try:
        client = genai.Client(api_key=api_key)
        is_gemini3 = model.startswith("gemini-3")

        if is_gemini3:
            aspect_ratio_map = {
                "1024x1024": "1:1",
                "1024x1792": "9:16",
                "1792x1024": "16:9"
            }

            image_config_kwargs = {}
            if size in aspect_ratio_map:
                image_config_kwargs["aspect_ratio"] = aspect_ratio_map[size]
                image_config_kwargs["image_size"] = size
            else:
                image_config_kwargs["image_size"] = size

            image_config = types.ImageConfig(**image_config_kwargs)
            config = types.GenerateContentConfig(image_config=image_config)

            response = client.models.generate_content(
                model=model,
                contents=prompt,
                config=config
            )
        else:
            # Gemini 2.5 flash image: call without image_config to avoid INVALID_ARGUMENT.
            response = client.models.generate_content(
                model=model,
                contents=prompt
            )

        # Prefer top-level parts; fall back to first candidate content parts.
        parts = getattr(response, "parts", None)
        if not parts and getattr(response, "candidates", None):
            try:
                parts = response.candidates[0].content.parts
            except Exception:
                parts = None

        if not parts:
            raise ValueError("No image parts returned from Google image generation API")

        image_data = None
        for idx, part in enumerate(parts):
            inline = getattr(part, "inline_data", None)
            if inline and getattr(inline, "data", None):
                raw_data = inline.data
                if isinstance(raw_data, bytes):
                    image_data = raw_data
                elif isinstance(raw_data, str):
                    image_data = base64.b64decode(raw_data)
                else:
                    if hasattr(part, "as_image"):
                        buf = io.BytesIO()
                        part.as_image().save(buf, format="PNG")
                        image_data = buf.getvalue()
                if image_data:
                    break

            # Fallback: some SDK versions expose as_image without inline_data
            if image_data is None and hasattr(part, "as_image"):
                try:
                    buf = io.BytesIO()
                    part.as_image().save(buf, format="PNG")
                    image_data = buf.getvalue()
                    if image_data:
                        break
                except Exception as img_exc:
                    print(f"⚠️ Failed to render part {idx} via as_image fallback: {img_exc}")

            # Debug: show available fields when no data is present.
            if image_data is None:
                part_attrs = [a for a in dir(part) if not a.startswith("_")]
                print(f"⚠️ Google image part {idx} missing inline_data; attrs: {part_attrs}")

        if not image_data:
            # Additional debug context to help diagnose API payload shape
            candidate_count = len(getattr(response, "candidates", []) or [])
            print(f"⚠️ Google image generation returned no inline_data. parts={len(parts)}, candidates={candidate_count}, model={model}")
            print(f"Response repr: {repr(response)}")
            raise ValueError("No usable image data returned from Google image generation API")

        prompt_tokens = max(1, len(prompt) // 4)
        cost_result = track_image_cost(
            model_name=model,
            image_count=1,
            image_size=size,
            quality=quality,
            prompt_tokens=prompt_tokens,
            chain_name=chain_name,
            pipeline=pipeline,
            run_id=run_id
        )

        return {
            "success": True,
            "image_data": image_data,
            "model": model,
            "size": size,
            "quality": quality,
            "style": None,
            "prompt": prompt,
            "revised_prompt": None,
            "cost_tracking": {
                "cost": cost_result.total_cost,
                "cost_details": str(cost_result)
            }
        }

    except Exception as e:
        print(f"Error generating image with Google: {e}")
        return {
            "success": False,
            "error": str(e),
            "model": model,
            "size": size,
            "prompt": prompt
        }


def edit_image_with_openai(
    image_bytes: bytes,
    prompt: str,
    context: str = "",
    model: str = "gpt-image-1",
    size: str = "1024x1024",
    quality: str = "standard",
    chain_name: str = "",
    pipeline: str = "",
    run_id: str = ""
) -> Dict[str, Any]:
    """
    Apply an image-to-image edit using OpenAI's image API.
    """
    composed_prompt = prompt if not context else f"{prompt}\nContext: {context}"

    if len(composed_prompt) > 4000:
        composed_prompt = composed_prompt[:4000]

    try:
        client = OpenAI()

        # Normalize to PNG bytes to satisfy mimetype requirements
        with Image.open(io.BytesIO(image_bytes)) as img:
            png_buffer = io.BytesIO()
            img.save(png_buffer, format="PNG")
            png_buffer.seek(0)
            png_bytes = png_buffer.read()

        # Provide a file handle with a .png name so the SDK sets the mimetype
        with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
            tmp.write(png_bytes)
            tmp.flush()
            tmp_path = tmp.name

        try:
            with open(tmp_path, "rb") as fh:
                response = client.images.edit(
                    model=model,
                    image=fh,
                    prompt=composed_prompt,
                    size=size
                )
        finally:
            try:
                os.remove(tmp_path)
            except OSError:
                pass

        if not response.data:
            raise ValueError("No image data returned from image edit API")

        image_info = response.data[0]

        if image_info.url:
            image_response = requests.get(image_info.url, timeout=30)
            image_response.raise_for_status()
            edited_bytes = image_response.content
        elif getattr(image_info, "b64_json", None):
            edited_bytes = base64.b64decode(image_info.b64_json)
        else:
            raise ValueError("No image URL or base64 data returned from edit API")

        prompt_tokens = max(1, len(composed_prompt) // 4)
        cost_result = track_image_cost(
            model_name=model,
            image_count=1,
            image_size=size,
            quality=quality,
            prompt_tokens=prompt_tokens,
            chain_name=chain_name,
            pipeline=pipeline,
            run_id=run_id
        )

        return {
            "success": True,
            "image_data": edited_bytes,
            "model": model,
            "size": size,
            "quality": quality,
            "prompt": composed_prompt,
            "revised_prompt": getattr(image_info, "revised_prompt", None),
            "cost_tracking": {
                "cost": cost_result.total_cost,
                "cost_details": str(cost_result)
            }
        }

    except Exception as e:
        print(f"Error editing image with OpenAI: {e}")
        return {
            "success": False,
            "error": str(e),
            "model": model,
            "size": size,
            "prompt": composed_prompt
        }


def save_generated_image(
    image_data: bytes,
    filename: str,
    output_dir: str,
    metadata: Optional[Dict[str, Any]] = None,
    max_width: Optional[int] = None,
    force_webp: bool = False,
    thumbnail_size: Optional[int] = 256
) -> Dict[str, Any]:
    """
    Save generated image data to file with optional metadata.

    Args:
        image_data: Binary image data
        filename: Filename for the image (should include extension)
        output_dir: Directory to save the image
        metadata: Optional metadata to save alongside the image

    Returns:
        Dictionary with save result information
    """
    try:
        # Ensure output directory exists
        ensure_dir(output_dir)

        # Determine final filename/format
        base_filename, ext = os.path.splitext(filename)
        if force_webp:
            ext = ".webp"
        final_filename = f"{base_filename}{ext}"
        image_path = os.path.join(output_dir, final_filename)

        # Always process via Pillow so callers get consistent sizing/format
        image_bytes = io.BytesIO(image_data)
        with Image.open(image_bytes) as img:
            original_format = img.format
            has_alpha = img.mode in ("RGBA", "LA", "P")
            convert_mode = "RGBA" if has_alpha else "RGB"
            processed_img = img.convert(convert_mode)

            if max_width and processed_img.width > max_width:
                ratio = max_width / float(processed_img.width)
                new_size = (max_width, int(processed_img.height * ratio))
                processed_img = processed_img.resize(new_size, Image.Resampling.LANCZOS)

            save_kwargs = {}
            if force_webp:
                save_kwargs.update({"format": "WEBP", "quality": 80, "method": 6})
            else:
                candidate_format = original_format or ext.lstrip(".").upper()
                save_kwargs["format"] = candidate_format if candidate_format else "PNG"

            buffer = io.BytesIO()
            processed_img.save(buffer, **save_kwargs)
            processed_bytes = buffer.getvalue()
            content_type = f"image/{str(save_kwargs.get('format', 'PNG')).lower()}"
            image_path = write_bytes(image_path, processed_bytes, content_type=content_type)

            thumbnail_path = None
            thumbnail_filename = None
            if thumbnail_size:
                try:
                    thumb = ImageOps.fit(
                        processed_img,
                        (thumbnail_size, thumbnail_size),
                        Image.Resampling.LANCZOS,
                        centering=(0.5, 0.5)
                    )
                    thumb_filename = f"{base_filename}-sm.webp"
                    thumbnail_path = os.path.join(output_dir, thumb_filename)
                    thumb_buffer = io.BytesIO()
                    thumb.save(thumb_buffer, format="WEBP", quality=80, method=6)
                    thumbnail_path = write_bytes(
                        thumbnail_path,
                        thumb_buffer.getvalue(),
                        content_type="image/webp",
                    )
                    thumbnail_filename = thumb_filename
                except Exception as thumb_exc:
                    print(f"⚠️ Failed to create thumbnail for {final_filename}: {thumb_exc}")
                    thumbnail_path = None
                    thumbnail_filename = None

        # Save metadata if provided
        metadata_path = None
        if metadata:
            base_name = os.path.splitext(filename)[0]
            metadata_filename = f"{base_name}_metadata.json"
            metadata_path = os.path.join(output_dir, metadata_filename)
            metadata_path = write_json(metadata_path, metadata)

        return {
            "success": True,
            "image_path": image_path,
            "metadata_path": metadata_path,
            "filename": final_filename,
            "size_bytes": len(processed_bytes),
            "thumbnail_path": thumbnail_path,
            "thumbnail_filename": thumbnail_filename,
            "format": save_kwargs.get("format")
        }

    except Exception as e:
        return {
            "success": False,
            "error": str(e),
            "filename": filename
        }


def save_large_variant(
    image_data: bytes,
    final_filename: str,
    output_dir: str,
    *,
    target_width: int = 1600
) -> Tuple[Optional[str], Optional[str]]:
    """
    Save a large webp variant alongside the primary asset.
    """
    if not final_filename:
        return None, None

    base_name, _ = os.path.splitext(final_filename)
    large_filename = f"{base_name}-lg.webp"

    large_result = save_generated_image(
        image_data=image_data,
        filename=large_filename,
        output_dir=output_dir,
        metadata=None,
        max_width=target_width,
        force_webp=True,
        thumbnail_size=None
    )

    if not large_result.get("success", True):
        print(f"    ⚠️ Failed to save large variant for {final_filename}: {large_result.get('error')}")
        return None, None

    return large_result.get("filename", large_filename), large_result.get("image_path")

def generate_and_save_image(
    description: str,
    item_id: str,
    output_dir: str,
    context: str = "",
    overwrite: bool = False,
    model: Optional[str] = None,
    size: str = "1024x1024",
    quality: str = "low",
    provider: str = "open-ai",
    create_large_variant: bool = False,
    chain_name: str = "",
    pipeline: str = "",
    run_id: str = ""
) -> Dict[str, Any]:
    """
    Complete workflow: generate image and save to file.

    Args:
        description: Description of the image to generate
        item_id: Unique identifier for the item (used in filename)
        output_dir: Directory to save the image
        context: Additional context for image generation
        overwrite: Whether to overwrite existing files
        model: Image model to use for the selected provider
        size: Image size label (provider-specific)
        quality: Image quality - "low", "medium", or "high"
        provider: Image provider ("open-ai" or "google")
        chain_name: Name of the chain calling this function (for cost tracking)
        pipeline: Name of the pipeline (for cost tracking)
        run_id: Run identifier (for cost tracking)

    Returns:
        Dictionary with complete generation and save result
    """
    # Generate safe filename
    safe_id = slugify(item_id)
    filename = f"{safe_id}.png"
    image_path = os.path.join(output_dir, filename)
    base_name = os.path.splitext(filename)[0]
    metadata_filename = f"{base_name}_metadata.json"
    metadata_path = os.path.join(output_dir, metadata_filename)
    thumbnail_filename = f"{base_name}-sm.webp"
    thumbnail_path = os.path.join(output_dir, thumbnail_filename)

    def backup_existing_file(path: str) -> None:
        if path_exists(path):
            timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
            base, ext = os.path.splitext(path)
            backup_path = f"{base}-{timestamp}{ext}"
            try:
                if use_local_storage():
                    shutil.move(path, backup_path)
                else:
                    copy_path(path, backup_path)
                    remove_path(path)
                print(f"  [backup] Renamed existing file to {backup_path}")
            except Exception as move_err:
                print(f"  ⚠️ Failed to back up {path}: {move_err}")

    # Check if image already exists
    if path_exists(image_path) and not overwrite:
        return {
            "success": True,
            "skipped": True,
            "reason": "Image already exists",
            "image_path": image_path,
            "item_id": item_id,
            "filename": filename
        }
    elif path_exists(image_path) and overwrite:
        # Backup existing outputs before overwriting
        backup_existing_file(image_path)
        backup_existing_file(metadata_path)
        backup_existing_file(thumbnail_path)

    provider = provider or "open-ai"
    if model is None:
        model = "gemini-2.5-flash-image" if provider == "google" else "gpt-image-1"

    # Generate the image
    if provider == "google":
        generation_result = generate_image_with_google(
            description=description,
            context=context,
            model=model,
            size=size,
            quality=quality,
            chain_name=chain_name,
            pipeline=pipeline,
            run_id=run_id
        )
    elif provider == "open-ai":
        generation_result = generate_image_with_openai(
            description=description,
            context=context,
            model=model,
            size=size,
            quality=quality,
            chain_name=chain_name,
            pipeline=pipeline,
            run_id=run_id
        )
    else:
        return {
            "success": False,
            "item_id": item_id,
            "error": f"Unsupported image provider '{provider}'"
        }

    if not generation_result["success"]:
        return {
            "success": False,
            "item_id": item_id,
            "error": generation_result["error"]
        }

    # Attach provider metadata for downstream logging
    generation_result["provider"] = provider

    # Save the image
    save_result = save_generated_image(
        image_data=generation_result["image_data"],
        filename=filename,
        output_dir=output_dir,
        metadata={
            "item_id": item_id,
            "description": description,
            "context": context,
            "generation_params": {
                "provider": provider,
                "model": generation_result["model"],
                "size": generation_result["size"],
                "quality": generation_result["quality"],
                "style": generation_result["style"]
            },
            "prompt": generation_result["prompt"],
            "revised_prompt": generation_result.get("revised_prompt"),
            "generated_at": __import__("datetime").datetime.now().isoformat()
        }
    )

    large_filename = None
    large_path = None
    if save_result["success"] and create_large_variant:
        large_filename, large_path = save_large_variant(
            image_data=generation_result["image_data"],
            final_filename=save_result["filename"],
            output_dir=output_dir
        )

    if save_result["success"]:
        return {
            "success": True,
            "item_id": item_id,
            "description": description,
            "image_path": save_result["image_path"],
            "metadata_path": save_result["metadata_path"],
            "filename": filename,
            "size_bytes": save_result["size_bytes"],
            "large_filename": large_filename,
            "large_path": large_path,
            "generation_params": generation_result
        }
    else:
        return {
            "success": False,
            "item_id": item_id,
            "error": save_result["error"]
        }


def edit_and_save_image(
    description: str,
    source_image_path: str,
    item_id: str,
    output_dir: str,
    context: str = "",
    overwrite: bool = False,
    model: Optional[str] = None,
    size: str = "1024x1024",
    quality: str = "standard",
    create_large_variant: bool = False,
    create_webp_variant: bool = False,
    chain_name: str = "",
    pipeline: str = "",
    run_id: str = ""
) -> Dict[str, Any]:
    """
    Complete workflow: apply image-to-image edit and save to file.
    """
    safe_id = slugify(item_id)
    filename = f"{safe_id}.png"
    image_path = os.path.join(output_dir, filename)
    base_name = os.path.splitext(filename)[0]
    metadata_filename = f"{base_name}_metadata.json"
    metadata_path = os.path.join(output_dir, metadata_filename)
    thumbnail_filename = f"{base_name}-sm.webp"
    thumbnail_path = os.path.join(output_dir, thumbnail_filename)

    def backup_existing_file(path: str) -> None:
        if path_exists(path):
            timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
            base, ext = os.path.splitext(path)
            backup_path = f"{base}-{timestamp}{ext}"
            try:
                if use_local_storage():
                    shutil.move(path, backup_path)
                else:
                    copy_path(path, backup_path)
                    remove_path(path)
                print(f"  [backup] Renamed existing file to {backup_path}")
            except Exception as move_err:
                print(f"  ⚠️ Failed to back up {path}: {move_err}")

    if path_exists(image_path) and not overwrite:
        return {
            "success": True,
            "skipped": True,
            "reason": "Image already exists",
            "image_path": image_path,
            "item_id": item_id,
            "filename": filename
        }
    elif path_exists(image_path) and overwrite:
        backup_existing_file(image_path)
        backup_existing_file(metadata_path)
        backup_existing_file(thumbnail_path)

    if not os.path.exists(source_image_path):
        return {
            "success": False,
            "item_id": item_id,
            "error": f"Source image not found: {source_image_path}"
        }

    with open(source_image_path, "rb") as f:
        source_bytes = f.read()

    if model is None:
        model = "gpt-image-1"

    if quality in ("low", "medium", "high"):
        quality_label = quality
    elif quality in ("hd",):
        quality_label = "high"
    else:
        quality_label = "low"

    edit_result = edit_image_with_openai(
        image_bytes=source_bytes,
        prompt=description,
        context=context,
        model=model,
        size=size,
        quality=quality_label,
        chain_name=chain_name,
        pipeline=pipeline,
        run_id=run_id
    )

    if not edit_result["success"]:
        return {
            "success": False,
            "item_id": item_id,
            "error": edit_result["error"]
        }

    save_result = save_generated_image(
        image_data=edit_result["image_data"],
        filename=filename,
        output_dir=output_dir,
        metadata={
            "item_id": item_id,
            "source_image": source_image_path,
            "description": description,
            "context": context,
            "generation_params": {
                "provider": "open-ai",
                "model": edit_result["model"],
                "size": edit_result["size"],
                "quality": edit_result["quality"]
            },
            "prompt": edit_result["prompt"],
            "revised_prompt": edit_result.get("revised_prompt"),
            "generated_at": __import__("datetime").datetime.now().isoformat()
        }
    )

    large_filename = None
    large_path = None
    if save_result["success"] and create_large_variant:
        large_filename, large_path = save_large_variant(
            image_data=edit_result["image_data"],
            final_filename=save_result["filename"],
            output_dir=output_dir
        )

    webp_filename = None
    webp_path = None
    if save_result["success"] and create_webp_variant:
        base_name = os.path.splitext(save_result["filename"])[0]
        webp_name = f"{base_name}.webp"
        webp_result = save_generated_image(
            image_data=edit_result["image_data"],
            filename=webp_name,
            output_dir=output_dir,
            metadata=None,
            force_webp=True,
            thumbnail_size=None
        )
        if webp_result.get("success"):
            webp_filename = webp_result.get("filename", webp_name)
            webp_path = webp_result.get("image_path")
        else:
            print(f"  ⚠️ Failed to save webp variant for {save_result['filename']}: {webp_result.get('error')}")

    if save_result["success"]:
        return {
            "success": True,
            "item_id": item_id,
            "description": description,
            "image_path": save_result["image_path"],
            "metadata_path": save_result["metadata_path"],
            "filename": filename,
            "size_bytes": save_result["size_bytes"],
            "thumbnail_filename": save_result.get("thumbnail_filename"),
            "thumbnail_path": save_result.get("thumbnail_path"),
            "large_filename": large_filename,
            "large_path": large_path,
            "webp_filename": webp_filename,
            "webp_path": webp_path,
            "generation_params": edit_result
        }

    return {
        "success": False,
        "item_id": item_id,
        "error": save_result["error"]
    }


def batch_generate_images(
    items: list,
    output_dir: str,
    context: str = "",
    overwrite: bool = False,
    max_concurrent: int = 3,
    provider: str = "open-ai",
    chain_name: str = "",
    pipeline: str = "",
    run_id: str = ""
) -> Dict[str, Any]:
    """
    Generate multiple images in batch with rate limiting.

    Args:
        items: List of dicts with keys: id, description, (optional: context)
        output_dir: Directory to save images
        context: Global context for all images
        overwrite: Whether to overwrite existing files
        max_concurrent: Maximum concurrent generations (rate limiting)
        provider: Image provider ("open-ai" or "google")
        chain_name: Name of the chain calling this function (for cost tracking)
        pipeline: Name of the pipeline (for cost tracking)
        run_id: Run identifier (for cost tracking)

    Returns:
        Dictionary with batch generation results
    """
    import concurrent.futures
    import time

    results = {
        "total_requested": len(items),
        "successful": [],
        "failed": [],
        "skipped": [],
        "summary": {}
    }

    def generate_single_item(item):
        """Generate image for a single item with rate limiting."""
        # Small delay to avoid hitting rate limits
        time.sleep(0.5)

        item_context = item.get("context", context)
        return generate_and_save_image(
            description=item["description"],
            item_id=item["id"],
            output_dir=output_dir,
            context=item_context,
            overwrite=overwrite,
            provider=provider,
            chain_name=chain_name,
            pipeline=pipeline,
            run_id=run_id
        )

    # Use thread pool for controlled concurrency
    with concurrent.futures.ThreadPoolExecutor(max_workers=max_concurrent) as executor:
        futures = {executor.submit(generate_single_item, item): item for item in items}

        for future in concurrent.futures.as_completed(futures):
            item = futures[future]
            try:
                result = future.result()

                if result["success"]:
                    if result.get("skipped"):
                        results["skipped"].append(result)
                    else:
                        results["successful"].append(result)
                else:
                    results["failed"].append(result)

            except Exception as e:
                results["failed"].append({
                    "item_id": item["id"],
                    "error": str(e),
                    "success": False
                })

    # Compile summary
    results["summary"] = {
        "total_requested": len(items),
        "successful_count": len(results["successful"]),
        "failed_count": len(results["failed"]),
        "skipped_count": len(results["skipped"]),
        "success_rate": len(results["successful"]) / len(items) * 100 if items else 0
    }

    return results


def create_image_gallery_html(
    images_dir: str,
    output_file: str,
    title: str = "Generated Images"
) -> str:
    """
    Create an HTML gallery of generated images for easy viewing.

    Args:
        images_dir: Directory containing the images
        output_file: Output HTML file path
        title: Gallery title

    Returns:
        Path to the created HTML file
    """
    import glob

    # Find all image files
    image_extensions = ["*.png", "*.jpg", "*.jpeg", "*.webp"]
    image_files = []

    for ext in image_extensions:
        image_files.extend(glob.glob(os.path.join(images_dir, ext)))

    # Sort by filename
    image_files.sort()

    # Generate HTML
    html_content = f"""<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>{title}</title>
    <style>
        body {{ font-family: Arial, sans-serif; margin: 20px; }}
        .gallery {{ display: grid; grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); gap: 20px; }}
        .image-card {{ border: 1px solid #ddd; border-radius: 8px; padding: 10px; }}
        .image-card img {{ width: 100%; height: auto; border-radius: 4px; }}
        .image-title {{ margin: 10px 0 5px 0; font-weight: bold; }}
        .image-meta {{ font-size: 0.9em; color: #666; }}
    </style>
</head>
<body>
    <h1>{title}</h1>
    <div class="gallery">
"""

    for image_path in image_files:
        rel_path = os.path.relpath(image_path, os.path.dirname(output_file))
        filename = os.path.basename(image_path)
        name = os.path.splitext(filename)[0]

        # Try to load metadata if it exists
        metadata_path = os.path.join(os.path.dirname(image_path), f"{name}_metadata.json")
        description = "No description available"

        if path_exists(metadata_path):
            try:
                metadata = read_json(metadata_path)
                description = metadata.get('description', description)
            except:
                pass

        html_content += f"""
        <div class="image-card">
            <img src="{rel_path}" alt="{name}" />
            <div class="image-title">{name}</div>
            <div class="image-meta">{description}</div>
        </div>
"""

    html_content += """
    </div>
</body>
</html>"""

    # Save HTML file
    ensure_dir(os.path.dirname(output_file))
    return write_text(output_file, html_content, content_type="text/html; charset=utf-8")
