"""
Standardized LLM interface for all providers.
This ensures consistent behavior when switching between different LLM providers.
Uses Pydantic models for standardized response structures.
"""

import os
from abc import ABC, abstractmethod
from typing import Dict, Any, Optional, List
from pydantic import BaseModel, Field
from dotenv import load_dotenv

load_dotenv()

# Pydantic models for standardized responses
class LLMResponse(BaseModel):
    """Standardized response from any LLM provider"""
    content: str = Field(..., description="The text response from the LLM")
    model: str = Field(..., description="The model used for generation")
    usage: Dict[str, int] = Field(default_factory=dict, description="Token usage information")
    metadata: Dict[str, Any] = Field(default_factory=dict, description="Additional metadata")

class LLMUsage(BaseModel):
    """Token usage information"""
    prompt_tokens: int = Field(default=0, description="Number of tokens in the prompt")
    completion_tokens: int = Field(default=0, description="Number of tokens in the completion")
    total_tokens: int = Field(default=0, description="Total number of tokens used")

class BaseLLM(ABC):
    """Base class for all LLM providers"""
    
    @abstractmethod
    def generate(self, prompt: str) -> LLMResponse:
        """Generate a response from the LLM"""
        pass
    
    def generate_text(self, prompt: str) -> str:
        """Generate a text response from the LLM (simplified interface)"""
        response = self.generate(prompt)
        return response.content
    
    def invoke(self, prompt: str, **kwargs) -> str:
        """Invoke the LLM (for crewAI compatibility)"""
        return self.generate_text(prompt)
    
    def __call__(self, prompt: str, **kwargs) -> Dict[str, Any]:
        """Call the LLM (for crewAI compatibility)"""
        response = self.generate_text(prompt)
        return {"output": response}


class GeminiLLMAdapter(BaseLLM):
    """Adapter for GeminiLLM"""
    
    def __init__(self, model_name: str = "gemini-pro", temperature: float = 0.2):
        from utils.gemini_llm import GeminiLLM
        self.llm = GeminiLLM(model_name=model_name, temperature=temperature)
        self.model_name = model_name
    
    def generate(self, prompt: str) -> LLMResponse:
        try:
            content = self.llm.generate(prompt)
            # Create a standardized response
            return LLMResponse(
                content=content,
                model=self.model_name,
                usage={},  # Gemini doesn't provide token usage in the current implementation
                metadata={"provider": "gemini"}
            )
        except Exception as e:
            # Handle errors gracefully
            return LLMResponse(
                content=f"Error: {str(e)}",
                model=self.model_name,
                usage={},
                metadata={"provider": "gemini", "error": str(e)}
            )


class GroqLLMAdapter(BaseLLM):
    """Adapter for Groq LLM"""
    
    def __init__(self, model_name: str = "llama3-70b-8192", temperature: float = 0.2):
        try:
            from langchain_groq import ChatGroq
            self.llm = ChatGroq(
                model_name=model_name,
                temperature=temperature,
                groq_api_key=os.getenv("GROQ_API_KEY")
            )
            self.model_name = model_name
        except ImportError:
            raise ImportError("langchain-groq package not installed. Please run: pip install langchain-groq")
    
    def generate(self, prompt: str) -> LLMResponse:
        try:
            response = self.llm.invoke(prompt)
            # Extract token usage if available
            usage = {}
            if hasattr(response, 'usage') and response.usage:
                usage = {
                    'prompt_tokens': getattr(response.usage, 'prompt_tokens', 0),
                    'completion_tokens': getattr(response.usage, 'completion_tokens', 0),
                    'total_tokens': getattr(response.usage, 'total_tokens', 0)
                }
            
            # Create a standardized response
            return LLMResponse(
                content=response.content,
                model=self.model_name,
                usage=usage,
                metadata={"provider": "groq"}
            )
        except Exception as e:
            # Handle errors gracefully
            return LLMResponse(
                content=f"Error: {str(e)}",
                model=self.model_name,
                usage={},
                metadata={"provider": "groq", "error": str(e)}
            )


class OpenAILLMAdapter(BaseLLM):
    """Adapter for OpenAI LLM"""
    
    def __init__(self, model_name: str = "gpt-4o", temperature: float = 0.2):
        try:
            from langchain_openai import ChatOpenAI
            self.llm = ChatOpenAI(
                model=model_name,
                temperature=temperature,
                api_key=os.getenv("OPENAI_API_KEY")
            )
            self.model_name = model_name
        except ImportError:
            raise ImportError("langchain-openai package not installed. Please run: pip install langchain-openai")
    
    def generate(self, prompt: str) -> LLMResponse:
        try:
            response = self.llm.invoke(prompt)
            # Extract token usage if available
            usage = {}
            if hasattr(response, 'usage') and response.usage:
                usage = {
                    'prompt_tokens': getattr(response.usage, 'prompt_tokens', 0),
                    'completion_tokens': getattr(response.usage, 'completion_tokens', 0),
                    'total_tokens': getattr(response.usage, 'total_tokens', 0)
                }
            
            # Create a standardized response
            return LLMResponse(
                content=response.content,
                model=self.model_name,
                usage=usage,
                metadata={"provider": "openai"}
            )
        except Exception as e:
            # Handle errors gracefully
            return LLMResponse(
                content=f"Error: {str(e)}",
                model=self.model_name,
                usage={},
                metadata={"provider": "openai", "error": str(e)}
            )


class AnthropicLLMAdapter(BaseLLM):
    """Adapter for Anthropic LLM"""
    
    def __init__(self, model_name: str = "claude-3-opus-20240229", temperature: float = 0.2):
        try:
            from langchain_anthropic import ChatAnthropic
            self.llm = ChatAnthropic(
                model=model_name,
                temperature=temperature,
                api_key=os.getenv("ANTHROPIC_API_KEY")
            )
            self.model_name = model_name
        except ImportError:
            raise ImportError("langchain-anthropic package not installed. Please run: pip install langchain-anthropic")
    
    def generate(self, prompt: str) -> LLMResponse:
        try:
            response = self.llm.invoke(prompt)
            # Extract token usage if available
            usage = {}
            if hasattr(response, 'usage') and response.usage:
                usage = {
                    'prompt_tokens': getattr(response.usage, 'prompt_tokens', 0),
                    'completion_tokens': getattr(response.usage, 'completion_tokens', 0),
                    'total_tokens': getattr(response.usage, 'total_tokens', 0)
                }
            
            # Create a standardized response
            return LLMResponse(
                content=response.content,
                model=self.model_name,
                usage=usage,
                metadata={"provider": "anthropic"}
            )
        except Exception as e:
            # Handle errors gracefully
            return LLMResponse(
                content=f"Error: {str(e)}",
                model=self.model_name,
                usage={},
                metadata={"provider": "anthropic", "error": str(e)}
            )


def get_llm(provider: Optional[str] = None, model_name: Optional[str] = None, temperature: Optional[float] = None) -> BaseLLM:
    """
    Get an LLM instance based on the specified provider or environment variables
    
    Args:
        provider: LLM provider (gemini, groq, openai, anthropic)
        model_name: Model name to use
        temperature: Temperature for generation
        
    Returns:
        An LLM instance with a standardized interface
    """
    # Get configuration from environment variables if not specified
    llm_provider = provider or os.getenv("LLM_PROVIDER", "gemini").lower()
    temp = temperature or float(os.getenv("LLM_TEMPERATURE", "0.2"))
    
    # Select the appropriate LLM based on provider
    try:
        if llm_provider == "groq":
            groq_model = model_name or os.getenv("GROQ_MODEL", "llama3-70b-8192")
            return GroqLLMAdapter(model_name=groq_model, temperature=temp)
        elif llm_provider == "openai":
            openai_model = model_name or os.getenv("OPENAI_MODEL", "gpt-4o")
            return OpenAILLMAdapter(model_name=openai_model, temperature=temp)
        elif llm_provider == "anthropic":
            anthropic_model = model_name or os.getenv("ANTHROPIC_MODEL", "claude-3-opus-20240229")
            return AnthropicLLMAdapter(model_name=anthropic_model, temperature=temp)
        else:
            # Default to Gemini
            gemini_model = model_name or os.getenv("GEMINI_MODEL", "gemini-pro")
            return GeminiLLMAdapter(model_name=gemini_model, temperature=temp)
    except ImportError as e:
        print(f"Error: {str(e)}")
        print("Falling back to Gemini...")
        gemini_model = os.getenv("GEMINI_MODEL", "gemini-pro")
        return GeminiLLMAdapter(model_name=gemini_model, temperature=temp)
