import os
from typing import Any, Dict, List, Optional
from dotenv import load_dotenv
import httpx
import json
import asyncio

load_dotenv()

class GeminiLLM:
    """
    A wrapper for the Gemini API to be used with crewAI.
    This provides a compatible interface for crewAI agents.
    """
    
    def __init__(self, model_name="gemini-2.0-flash", temperature=0.7):
        """
        Initialize the Gemini LLM
        
        Args:
            model_name: The Gemini model to use
            temperature: The temperature for generation (0.0 to 1.0)
        """
        self.api_key = os.getenv("GEMINI_API_KEY", "")
        if not self.api_key:
            raise ValueError("GEMINI_API_KEY environment variable not set")
        
        self.model_name = model_name
        self.temperature = temperature
        self.api_url = f"https://generativelanguage.googleapis.com/v1beta/models/{model_name}:generateContent"
    
    async def _agenerate(self, prompt: str) -> str:
        """
        Generate a response from Gemini asynchronously
        
        Args:
            prompt: The prompt to send to Gemini
            
        Returns:
            The generated response
        """
        headers = {
            "Content-Type": "application/json",
            "X-goog-api-key": self.api_key
        }
        
        payload = {
            "contents": [{"parts": [{"text": prompt}]}],
            "generationConfig": {
                "temperature": self.temperature,
                "topP": 0.95,
                "topK": 40
            }
        }
        
        async with httpx.AsyncClient() as client:
            response = await client.post(self.api_url, json=payload, headers=headers)
            
            if response.status_code != 200:
                raise Exception(f"Error from Gemini API: {response.text}")
            
            response_data = response.json()
            
            if "candidates" in response_data and len(response_data["candidates"]) > 0:
                candidate = response_data["candidates"][0]
                if "content" in candidate and "parts" in candidate["content"]:
                    parts = candidate["content"]["parts"]
                    if len(parts) > 0 and "text" in parts[0]:
                        return parts[0]["text"]
            
            raise Exception("Failed to parse response from Gemini API")
    
    def generate(self, prompt: str) -> str:
        """
        Generate a response from Gemini (synchronous wrapper)
        
        Args:
            prompt: The prompt to send to Gemini
            
        Returns:
            The generated response
        """
        try:
            # Try to get the current event loop
            loop = asyncio.get_event_loop()
            
            # Check if we're in an event loop already
            if loop.is_running():
                # Use a synchronous HTTP request instead when in an event loop
                return self._sync_generate(prompt)
            else:
                # Use asyncio.run() when not in an event loop
                return asyncio.run(self._agenerate(prompt))
        except RuntimeError:
            # If we can't get the event loop, use the synchronous version
            return self._sync_generate(prompt)
            
    def _sync_generate(self, prompt: str) -> str:
        """
        Generate a response from Gemini using synchronous HTTP requests
        
        Args:
            prompt: The prompt to send to Gemini
            
        Returns:
            The generated response
        """
        import requests
        
        headers = {
            "Content-Type": "application/json",
            "X-goog-api-key": self.api_key
        }
        
        payload = {
            "contents": [{"parts": [{"text": prompt}]}],
            "generationConfig": {
                "temperature": self.temperature,
                "topP": 0.95,
                "topK": 40
            }
        }
        
        response = requests.post(self.api_url, json=payload, headers=headers)
        
        if response.status_code != 200:
            raise Exception(f"Error from Gemini API: {response.text}")
        
        response_data = response.json()
        
        if "candidates" in response_data and len(response_data["candidates"]) > 0:
            candidate = response_data["candidates"][0]
            if "content" in candidate and "parts" in candidate["content"]:
                parts = candidate["content"]["parts"]
                if len(parts) > 0 and "text" in parts[0]:
                    return parts[0]["text"]
        
        raise Exception("Failed to parse response from Gemini API")
    
    # Methods to make this compatible with crewAI's expected LLM interface
    def invoke(self, prompt: str, **kwargs) -> str:
        """
        Invoke the LLM with a prompt (crewAI compatibility)
        
        Args:
            prompt: The prompt to send to the LLM
            
        Returns:
            The generated response
        """
        return self.generate(prompt)
    
    def __call__(self, prompt: str, **kwargs) -> Dict[str, Any]:
        """
        Call the LLM with a prompt (crewAI compatibility)
        
        Args:
            prompt: The prompt to send to the LLM
            
        Returns:
            A dictionary with the generated response
        """
        response = self.generate(prompt)
        return {"output": response}
