"""Utility for setting up and interacting with Chroma vectorstore for RAG."""

from __future__ import annotations

import math
import os
from pathlib import Path
from typing import Iterable, List, Optional

from langchain_core.documents import Document
from langchain_chroma import Chroma
from langchain_openai import OpenAIEmbeddings

# Defaults can be overridden with environment variables when needed.
DEFAULT_COLLECTION = "lumabit_lessons"
DEFAULT_EMBED_MODEL = os.getenv("RAG_EMBED_MODEL", "text-embedding-3-small")
DEFAULT_EMBED_BATCH_SIZE = int(os.getenv("RAG_EMBED_BATCH_SIZE", "256"))
DEFAULT_VECTORSTORE_BATCH_SIZE = int(os.getenv("RAG_VECTORSTORE_BATCH_SIZE", "500"))


def _batched(iterable: Iterable[Document], batch_size: int) -> Iterable[List[Document]]:
    """Yield successive batches from an iterable."""

    batch: List[Document] = []
    for item in iterable:
        batch.append(item)
        if len(batch) >= batch_size:
            yield batch
            batch = []
    if batch:
        yield batch


def get_vectorstore(
    collection_name: Optional[str] = None,
    persist_directory: Optional[str] = None,
    embedding_model: Optional[str] = None,
    embedding_batch_size: Optional[int] = None,
) -> Chroma:
    """Get a Chroma vectorstore instance configured with OpenAI embeddings."""

    collection = collection_name or DEFAULT_COLLECTION

    # If no persist directory is provided, use a default one
    if not persist_directory:
        persist_directory = f".chroma_{collection}"

    # Ensure the directory exists
    Path(persist_directory).mkdir(parents=True, exist_ok=True)

    # Initialize the embeddings model using the maintained langchain_openai package
    embeddings = OpenAIEmbeddings(
        model=embedding_model or DEFAULT_EMBED_MODEL,
        chunk_size=embedding_batch_size or DEFAULT_EMBED_BATCH_SIZE,
    )

    # Create and return the vectorstore
    return Chroma(
        collection_name=collection,
        embedding_function=embeddings,
        persist_directory=persist_directory
    )


def add_documents_to_vectorstore(
    documents: List[Document],
    collection_name: Optional[str] = None,
    persist_directory: Optional[str] = None,
    embedding_model: Optional[str] = None,
    embedding_batch_size: Optional[int] = None,
    vectorstore_batch_size: Optional[int] = None,
) -> Chroma:
    """Add documents to a Chroma vectorstore in safe batches."""

    if not documents:
        raise ValueError("No documents provided for vectorstore ingestion")

    batch_size = vectorstore_batch_size or DEFAULT_VECTORSTORE_BATCH_SIZE
    if batch_size <= 0:
        raise ValueError("vectorstore_batch_size must be a positive integer")

    vectorstore = get_vectorstore(
        collection_name=collection_name,
        persist_directory=persist_directory,
        embedding_model=embedding_model,
        embedding_batch_size=embedding_batch_size,
    )

    total_batches = math.ceil(len(documents) / batch_size)
    for index, batch in enumerate(_batched(documents, batch_size), start=1):
        print(f"[RAG] Adding batch {index}/{total_batches} containing {len(batch)} documents")
        vectorstore.add_documents(batch)
    # Persist if the vectorstore exposes a persistence hook (langchain_chroma auto-persists when using a
    # persistent directory, but older versions require an explicit call).
    if hasattr(vectorstore, "persist"):
        vectorstore.persist()
    elif hasattr(vectorstore, "_client") and hasattr(vectorstore._client, "persist"):
        vectorstore._client.persist()
    return vectorstore

def create_document_from_text(
    text: str,
    metadata: Optional[dict] = None
) -> Document:
    """
    Create a Document from text and optional metadata.

    Args:
        text: Document content
        metadata: Optional metadata dictionary

    Returns:
        Document instance
    """
    return Document(
        page_content=text,
        metadata=metadata or {}
    )
