import os
from typing import Any, Dict, List, Optional
from dotenv import load_dotenv
import httpx
import json
import asyncio

load_dotenv()

class GroqLLM:
    """
    A wrapper for the Groq API to be used with crewAI.
    This provides a compatible interface for crewAI agents.
    """
    
    def __init__(self, model_name="llama3-70b-8192", temperature=0.7):
        """
        Initialize the Groq LLM
        
        Args:
            model_name: The Groq model to use
            temperature: The temperature for generation (0.0 to 1.0)
        """
        self.api_key = os.getenv("GROQ_API_KEY", "")
        if not self.api_key:
            raise ValueError("GROQ_API_KEY environment variable not set")
        
        self.model_name = model_name
        self.temperature = temperature
        self.api_url = f"https://api.groq.com/openai/v1/chat/completions"
    
    async def _agenerate(self, prompt: str) -> str:
        """
        Generate a response from Groq asynchronously
        
        Args:
            prompt: The prompt to send to Groq
            
        Returns:
            The generated response
        """
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {self.api_key}"
        }
        
        payload = {
            "model": self.model_name,
            "messages": [{"role": "user", "content": prompt}],
            "temperature": self.temperature
        }
        
        async with httpx.AsyncClient() as client:
            response = await client.post(self.api_url, json=payload, headers=headers)
            
            if response.status_code != 200:
                raise Exception(f"Error from Groq API: {response.text}")
            
            response_data = response.json()
            
            if "choices" in response_data and len(response_data["choices"]) > 0:
                choice = response_data["choices"][0]
                if "message" in choice and "content" in choice["message"]:
                    return choice["message"]["content"]
            
            raise Exception("Failed to parse response from Groq API")
    
    def generate(self, prompt: str) -> str:
        """
        Generate a response from Groq (synchronous wrapper)
        
        Args:
            prompt: The prompt to send to Groq
            
        Returns:
            The generated response
        """
        try:
            # Try to get the current event loop
            loop = asyncio.get_event_loop()
            
            # Check if we're in an event loop already
            if loop.is_running():
                # Use a synchronous HTTP request instead when in an event loop
                return self._sync_generate(prompt)
            else:
                # Use asyncio.run() when not in an event loop
                return asyncio.run(self._agenerate(prompt))
        except RuntimeError:
            # If we can't get the event loop, use the synchronous version
            return self._sync_generate(prompt)
            
    def _sync_generate(self, prompt: str) -> str:
        """
        Generate a response from Groq using synchronous HTTP requests
        
        Args:
            prompt: The prompt to send to Groq
            
        Returns:
            The generated response
        """
        import requests
        
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {self.api_key}"
        }
        
        payload = {
            "model": self.model_name,
            "messages": [{"role": "user", "content": prompt}],
            "temperature": self.temperature
        }
        
        response = requests.post(self.api_url, json=payload, headers=headers)
        
        if response.status_code != 200:
            raise Exception(f"Error from Groq API: {response.text}")
        
        response_data = response.json()
        
        if "choices" in response_data and len(response_data["choices"]) > 0:
            choice = response_data["choices"][0]
            if "message" in choice and "content" in choice["message"]:
                return choice["message"]["content"]
        
        raise Exception("Failed to parse response from Groq API")
    
    # Methods to make this compatible with crewAI's expected LLM interface
    def invoke(self, prompt: str, **kwargs) -> str:
        """
        Invoke the LLM with a prompt (crewAI compatibility)
        
        Args:
            prompt: The prompt to send to the LLM
            
        Returns:
            The generated response
        """
        return self.generate(prompt)
    
    def __call__(self, prompt: str, **kwargs) -> Dict[str, Any]:
        """
        Call the LLM with a prompt (crewAI compatibility)
        
        Args:
            prompt: The prompt to send to the LLM
            
        Returns:
            A dictionary with the generated response
        """
        response = self.generate(prompt)
        return {"output": response}
