import os
from dotenv import load_dotenv
from utils.gemini_llm import GeminiLLM

# Load environment variables
load_dotenv()

# Get LLM configuration from environment variables
LLM_PROVIDER = os.getenv("LLM_PROVIDER", "gemini").lower()  # Default to gemini
LLM_MODEL = os.getenv("LLM_MODEL", "gemini-pro")  # Default model for Gemini
GROQ_MODEL = os.getenv("GROQ_MODEL", "llama3-70b-8192")  # Default model for Groq
LLM_TEMPERATURE = float(os.getenv("LLM_TEMPERATURE", "0.2"))  # Default temperature

def get_llm(model_name=None, temperature=None):
    """
    Get the appropriate LLM based on environment variables
    
    Args:
        model_name: Optional override for the model name
        temperature: Optional override for the temperature
        
    Returns:
        An LLM instance compatible with crewAI
    """
    # Use provided values or defaults from environment
    temp = temperature or LLM_TEMPERATURE
    
    # Select the appropriate LLM based on provider
    if LLM_PROVIDER == "groq":
        try:
            from utils.groq_llm import GroqLLM
            groq_model = model_name or GROQ_MODEL
            return GroqLLM(
                model_name=groq_model,
                temperature=temp
            )
        except ImportError:
            print("Error importing GroqLLM. Falling back to Gemini...")
            return GeminiLLM(model_name=LLM_MODEL, temperature=temp)
        except Exception as e:
            print(f"Error initializing GroqLLM: {str(e)}. Falling back to Gemini...")
            return GeminiLLM(model_name=LLM_MODEL, temperature=temp)
    else:
        # Default to Gemini
        gemini_model = model_name or LLM_MODEL
        return GeminiLLM(
            model_name=gemini_model,
            temperature=temp
        )
