from crewai import Agent
from utils.gemini_llm import GeminiLLM
import os
import time
import re
import asyncio
from typing import Dict, Any, Optional, List
from dotenv import load_dotenv
from utils.logger import Logger, app_logger
from utils.conversation_memory import ConversationMemoryManager, conversation_memory
from utils.prompt_templates import PromptTemplates
from utils.query_classifier import query_classifier
from utils.knowledge_base_manager import knowledge_base

load_dotenv()

class ResponseGenerationAgent:
    """Agent responsible for generating the final response to the user's query."""
    
    def __init__(self, model_name: str = "gemini-2.0-flash", logger: Optional[Logger] = None):
        """Initialize the ResponseGenerationAgent
        
        Args:
            model_name: Name of the LLM model to use
            logger: Optional logger instance
        """
        self.llm = GeminiLLM(
            model_name=model_name,
            temperature=0.3  # Lower temperature for more faithful responses
        )
        self.logger = logger or app_logger
        self.name = "ResponseGenerationAgent"
    
    def create_agent(self):
        return Agent(
            role="Response Generator",
            goal="Generate accurate, helpful, and personalized responses based on retrieved information",
            backstory="""You are an expert at crafting helpful and accurate responses 
            based on retrieved information. You can synthesize information from multiple 
            sources and present it in a clear, concise, and engaging manner.""",
            verbose=True,
            llm=self.llm
        )
    
    def run(self, context: Dict[str, Any]) -> Dict[str, Any]:
        """Run the agent to generate a response
        
        Args:
            context: The context containing the query, query analysis, and retrieved information
            
        Returns:
            Dict[str, Any]: The generated response and updated context
        """
        query = context.get("query", "")
        retrieved_info = context.get("retrieved_info", {})
        query_analysis = context.get("query_analysis", {})
        conversation_id = context.get("conversation_id", "")
        
        if not query:
            self.logger.error(f"{self.name}: No query provided in context")
            return {**context, "error": "No query provided"}
            
        if not retrieved_info:
            self.logger.warning(f"{self.name}: No retrieved information provided in context")
        
        self.logger.log_agent_start(self.name, f"Generating response for: {query[:50]}..." if len(query) > 50 else query)
        
        # Get conversation history if available
        conversation_history = ""
        if conversation_id:
            conversation_history = conversation_memory.get_context_string(conversation_id)
            self.logger.info(f"{self.name}: Using conversation history for ID {conversation_id[:8]}")
        
        start_time = time.time()
        try:
            # Generate a response based on the query and retrieved information
            response = self.generate_response(query, retrieved_info, conversation_history)
            execution_time = time.time() - start_time
            
            self.logger.log_agent_complete(self.name, execution_time)
            
            # If we have a conversation ID, add the response to the conversation history
            if conversation_id:
                # Make sure we have a valid response before adding to history
                response_text = response.get("response", "")
                if response_text:
                    try:
                        conversation_memory.add_message(
                            conversation_id=conversation_id,
                            content=response_text,
                            role="assistant"
                        )
                        self.logger.info(f"{self.name}: Added response to conversation history")
                    except Exception as history_error:
                        self.logger.error(f"Error adding to conversation history: {str(history_error)}")
            
            # Update context with the generated response
            return {**context, "response": response, "error": None}
            
        except Exception as e:
            execution_time = time.time() - start_time
            self.logger.log_agent_error(self.name, e)
            return {**context, "error": str(e)}
    
    async def arun(self, context: Dict[str, Any]) -> Dict[str, Any]:
        """Async version of run method
        
        Args:
            context: The context containing the query, query analysis, and retrieved information
            
        Returns:
            Dict[str, Any]: The generated response and updated context
        """
        query = context.get("query", "")
        retrieved_info = context.get("retrieved_info", {})
        query_analysis = context.get("query_analysis", {})
        conversation_id = context.get("conversation_id", "")
        
        if not query:
            self.logger.error(f"{self.name}: No query provided in context")
            return {**context, "error": "No query provided"}
            
        if not retrieved_info:
            self.logger.warning(f"{self.name}: No retrieved information provided in context")
        
        self.logger.log_agent_start(self.name, f"Generating response for: {query[:50]}..." if len(query) > 50 else query)
        
        # Get conversation history if available
        conversation_history = ""
        if conversation_id:
            conversation_history = conversation_memory.get_context_string(conversation_id)
            self.logger.info(f"{self.name}: Using conversation history for ID {conversation_id[:8]}")
        
        start_time = time.time()
        try:
            # Generate a response based on the query and retrieved information asynchronously
            response = await self.agenerate_response(query, retrieved_info, conversation_history)
            execution_time = time.time() - start_time
            
            self.logger.log_agent_complete(self.name, execution_time)
            
            # If we have a conversation ID, add the response to the conversation history
            if conversation_id:
                # Make sure we have a valid response before adding to history
                response_text = response.get("response", "")
                if response_text:
                    try:
                        conversation_memory.add_message(
                            conversation_id=conversation_id,
                            content=response_text,
                            role="assistant"
                        )
                        self.logger.info(f"{self.name}: Added response to conversation history")
                    except Exception as history_error:
                        self.logger.error(f"Error adding to conversation history: {str(history_error)}")
            
            # Update context with the generated response
            return {**context, "response": response, "error": None}
            
        except Exception as e:
            execution_time = time.time() - start_time
            self.logger.log_agent_error(self.name, e)
            return {**context, "error": str(e)}
    
    async def agenerate_response(self, query: str, retrieved_information: Dict[str, Any], conversation_history: str = "") -> Dict[str, Any]:
        """Generate response asynchronously
        
        Args:
            query: The original user query
            retrieved_information: Information retrieved from the knowledge base
            conversation_history: Previous conversation history
            
        Returns:
            Dict[str, Any]: The generated response with sources
        """
        # Get the results from the retrieved information
        results = retrieved_information.get("results", [])
        
        # Use LLM to generate a response asynchronously
        return await self._generate_response_with_llm_async(query, results, conversation_history)
    
    def generate_response(self, query: str, retrieved_information: Dict[str, Any], conversation_history: str = "") -> Dict[str, Any]:
        """Generate a response based on the retrieved information and conversation history
        
        Args:
            query: The original user query
            retrieved_information: Information retrieved from the knowledge base
            conversation_history: Previous conversation history
            
        Returns:
            Dict[str, Any]: The generated response with sources
        """
        # Get the results from the retrieved information
        results = retrieved_information.get("results", [])
        
        # Use LLM to generate a response based on query type, context, and retrieved information
        return self._generate_response_with_llm(query, results, conversation_history)
    
    async def _generate_response_with_llm_async(self, query: str, results: List[Dict[str, Any]], conversation_history: str = "") -> Dict[str, Any]:
        """
        Generate a response using the LLM asynchronously with language adaptation
        
        Args:
            query: The user's query
            results: Retrieved information from the knowledge base
            conversation_history: Previous conversation history
            
        Returns:
            Dict[str, Any]: The generated response with sources
        """
        # Detect language
        language = self._detect_language(query)
        self.logger.info(f"Detected language: {language}")
        
        # Check for routing and special request types
        # Handle different data types - results could be a list or dict
        route = "rag"
        is_tech_list_request = False
        
        if isinstance(results, dict):
            route = results.get("route", "rag")
            is_tech_list_request = results.get("is_tech_list_request", False)
        
        # Special handling for technology listing requests
        if is_tech_list_request:
            self.logger.info("Detected technology listing request, using specialized formatting")
            return await self._generate_technology_list_response(query, results, conversation_history, language)

        
        # Handle smalltalk
        if route == "smalltalk":
            greeting = self._get_greeting(language)
            if language == "hi":
                response = f"{greeting}मैं आपकी कैसे मदद कर सकता हूँ?"
            elif language == "hinglish":
                response = f"{greeting}Kaise main aapki help kar sakta hoon?"
            else:  # English
                response = f"{greeting}How can I help you today?"
            
            return {
                "response": response,
                "sources": []
            }
        
        # Handle abuse
        if route == "abuse":
            if language == "hi":
                response = "मैं आपकी मदद करना चाहता हूँ, लेकिन कृपया उचित भाषा का प्रयोग करें।"
            elif language == "hinglish":
                response = "Main aapki help karna chahta hoon, lekin please appropriate language ka use karein."
            else:  # English
                response = "I'm here to help with your questions about MangoIT Solutions. Please use appropriate language so I can assist you effectively."
            
            return {
                "response": response,
                "sources": []
            }
        
        # Standard RAG path
        # Extract content from results
        content_snippets = []
        sources = []
        
        # Log the number of results received
        self.logger.info(f"Processing {len(results)} results from vector DB")
        
        # Use up to 6 results for better context
        for i, result in enumerate(results[:6]):
            if "content" in result:
                # Clean the content
                clean_content = self._clean_content(result["content"])
                content_snippets.append(clean_content[:500])  # Increased length for better context
                
                # Add source with citation info
                if "source" in result:
                    source = result["source"]
                    chunk = result.get("chunk", i)
                    relevance = result.get("relevance", 0)
                    sources.append({
                        "source": source,
                        "chunk": chunk,
                        "relevance": relevance
                    })
        
        # Create prompt for the LLM with citation instructions and language adaptation
        prompt = self._build_citation_prompt(query, content_snippets, conversation_history, sources, language)
        
        try:
            # Generate response using LLM with thread offloading
            response = await asyncio.to_thread(self.llm.generate, prompt)
            
            # Clean up the response
            response = response.strip()
            response = re.sub(r'^(Sam:|Assistant:)\s*', '', response)
            
            # Remove source citations from the response
            response = re.sub(r'\(source: [^\)]+, chunk: \d+\)', '', response)
            response = re.sub(r'\(source: [^\)]+\)', '', response)
            
            # Clean up any double spaces created by removing citations
            response = re.sub(r'\s{2,}', ' ', response)
            
            return {
                "response": response,
                "sources": sources
            }
            
        except Exception as e:
            self.logger.error(f"Error generating response with LLM: {str(e)}")
            
            # Fallback responses
            if not results:
                return {
                    "response": "I don't have enough information in my knowledge base to answer confidently. Could you clarify your question?",
                    "sources": []
                }
            else:
                return {
                    "response": f"Based on what I know, {content_snippets[0][:100] if content_snippets else 'we offer various services at MangoIT Solutions'}. How can I help you further?",
                    "sources": sources
                }
    
    def _build_citation_prompt(self, query: str, content_snippets: List[str], conversation_history: str, sources: List[Dict[str, Any]], language: str = "en") -> str:
        """
        Build prompt with citation instructions and language adaptation
        
        Args:
            query: The user's query
            content_snippets: Content snippets from knowledge base
            conversation_history: Previous conversation history
            sources: Source information for citations
            language: Detected language ('en', 'hi', or 'hinglish')
            
        Returns:
            str: Prompt for LLM
        """
        # Check if we have enough relevant information
        has_relevant_info = any(source.get("relevance", 0) >= 0.35 for source in sources) if sources else False
        
        # Prepare conversation context
        if conversation_history:
            conversation_context = f"Previous conversation:\n{conversation_history}"
        else:
            conversation_context = "This is the first message in the conversation."
        
        # Prepare knowledge context with citation info
        if content_snippets:
            knowledge_sections = []
            for i, snippet in enumerate(content_snippets):
                if i < len(sources):
                    source_path = sources[i]["source"]
                    chunk_id = sources[i]["chunk"]
                    knowledge_sections.append(f"[Snippet {i+1}] (source: {source_path}, chunk: {chunk_id})\n{snippet}")
                else:
                    knowledge_sections.append(f"[Snippet {i+1}]\n{snippet}")
            knowledge_context = "Information from knowledge base:\n" + "\n\n".join(knowledge_sections)
        else:
            knowledge_context = "No specific information found in the knowledge base."
        
        # Language-specific instructions
        language_instructions = ""
        if language == "hi":
            language_instructions = "\nRespond in Hindi. Use Devanagari script."
        elif language == "hinglish":
            language_instructions = "\nRespond in Hinglish (mix of Hindi and English). Use Roman script for Hindi words."
        
        # Get appropriate greeting if this is a new conversation
        is_new_conversation = not conversation_history
        greeting_instruction = ""
        if is_new_conversation:
            greeting = self._get_greeting(language).strip()
            greeting_instruction = f"4. Start your response with \"{greeting}\" as this is a new conversation."
        else:
            greeting_instruction = "4. Do NOT include any greeting as this is a continuation of the conversation."
        
        # Build the prompt with citation instructions and language adaptation
        prompt = f"""
        You are Sam, a friendly and personable AI assistant for MangoIT Solutions. Your responses should be warm, 
        conversational, and human-like while remaining accurate and helpful.
        
        User query: \"{query}\"
        
        {conversation_context}
        
        {knowledge_context}
        
        IMPORTANT INSTRUCTIONS:
        1. Answer ONLY based on the provided knowledge base information above.
        2. DO NOT include source citations in your response. The system will track sources internally.
        3. If the knowledge base doesn't contain enough relevant information, say you don't know and ask a clarifying question.
        {greeting_instruction}
        5. Keep your answer with userfull information.
        6. Use a warm, human tone with contractions and short sentences.
        7. Do not use phrases like \"As an AI\" or \"Based on the provided information\".{language_instructions}
        
        Your response:
        """
        
        return prompt
    
    def _generate_response_with_llm(self, query: str, results: List[Dict[str, Any]], conversation_history: str = "") -> Dict[str, Any]:
        """
        Generate a response using the LLM based on the query, retrieved results, and conversation history
        
        Args:
            query: The user's query
            results: Retrieved information from the knowledge base
            conversation_history: Previous conversation history
            
        Returns:
            Dict[str, Any]: The generated response with sources
        """
        # Extract content from results
        content_snippets = []
        sources = []
        
        # Log the number of results received
        self.logger.info(f"Processing {len(results)} results from vector DB")
        
        for i, result in enumerate(results[:3]):  # Limit to top 3 results
            if "content" in result:
                # Clean the content
                clean_content = self._clean_content(result["content"])
                content_snippets.append(clean_content[:300])  # Limit length
                
                # Log snippet for debugging
                self.logger.info(f"Content snippet {i+1}: {clean_content[:100]}...")
                
                # Add source
                if "source" in result:
                    source = result["source"]
                    relevance = result.get("relevance", 0)
                    sources.append({
                        "source": source,
                        "relevance": relevance,
                        "chunk": result.get("id", i)
                    })
                    self.logger.info(f"Source {i+1}: {source} (relevance: {relevance:.2f})")
                else:
                    self.logger.warning(f"Result {i+1} has no source information")
        
        # Detect language
        language = self._detect_language(query)
        self.logger.info(f"Detected language: {language}")
        
        # Classify the query to determine the appropriate template
        classification = query_classifier.classify_query(query, conversation_history)
        query_type = classification["query_type"]
        self.logger.info(f"Query classified as: {query_type} (scores: {classification['scores']})")
        
        # Check for detected technologies
        detected_technologies = classification.get("detected_technologies", [])
        if detected_technologies:
            self.logger.info(f"Detected technologies: {', '.join(detected_technologies)}")
            
            # Enrich with knowledge base information
            for tech in detected_technologies:
                tech_info = knowledge_base.search_technologies(tech)
                if tech_info:
                    self.logger.info(f"Found knowledge base information for {tech}")
        
        # Select the appropriate prompt template based on query type
        if query_type == "technical":
            prompt = PromptTemplates.get_technical_prompt(
                query=query,
                retrieved_info={"results": results},
                conversation_history=conversation_history,
                detected_technologies=detected_technologies
            )
        elif query_type == "pricing":
            prompt = PromptTemplates.get_pricing_prompt(
                query=query,
                retrieved_info={"results": results},
                conversation_history=conversation_history,
                project_type=classification.get("project_type")
            )
        elif query_type == "scheduling":
            prompt = PromptTemplates.get_scheduling_prompt(
                query=query,
                retrieved_info={"results": results},
                conversation_history=conversation_history,
                meeting_details=classification.get("meeting_details")
            )
        else:  # Default to general template
            prompt = PromptTemplates.get_general_prompt(
                query=query,
                retrieved_info={"results": results},
                conversation_history=conversation_history
            )
        
        # Add guidelines to the prompt
        prompt += """
        Guidelines for your response:
        - FIRST PRIORITY: Use the specific information from the knowledge base to answer the query
        - Be warm and personable - use phrases like "I'm doing great!" or "Thanks for asking" when appropriate
        - Use conversational language with occasional interjections like "Well," "Actually," or "You know"
        - Show enthusiasm with phrases like "I'd be happy to help with that!"
        - If the user is greeting you, respond naturally as a person would ("Hi there! I'm doing well, thanks for asking!")
        - If you've already introduced yourself earlier in the conversation, don't repeat your introduction
        - Keep your responses concise but friendly
        - Avoid sounding robotic or overly formal
        
        Respond as Sam from MangoIT Solutions in a warm, human-like manner, using the knowledge base information when available.
        """
        
        try:
            # Generate response using LLM with error handling
            try:
                response = self.llm.generate(prompt)
            except RuntimeError as e:
                if "cannot be called from a running event loop" in str(e):
                    # We're already in an event loop, use a simpler approach
                    self.logger.warning("Using fallback response due to asyncio conflict")
                    return {
                        "response": "Hi there! I'm doing great, thanks for asking! I'm Sam from MangoIT Solutions. What can I help you with today?",
                        "sources": sources
                    }
                else:
                    raise
            except Exception as llm_error:
                self.logger.error(f"LLM error: {str(llm_error)}")
                raise
            
            # Clean up the response if needed
            response = response.strip()
            
            # Remove any "Sam:" or "Assistant:" prefixes that might be generated
            response = re.sub(r'^(Sam:|Assistant:)\s*', '', response)
            
            # Remove source citations from the response
            response = re.sub(r'\(source: [^\)]+, chunk: \d+\)', '', response)
            response = re.sub(r'\(source: [^\)]+\)', '', response)
            
            # Clean up any double spaces created by removing citations
            response = re.sub(r'\s{2,}', ' ', response)
            
            return {
                "response": response,
                "sources": sources
            }
            
        except Exception as e:
            self.logger.error(f"Error generating response with LLM: {str(e)}")
            
            # Fallback responses
            if not results:
                return {
                    "response": "I'm sorry, I don't have specific information about that. Is there something else about MangoIT Solutions I can help you with?",
                    "sources": []
                }
            else:
                # Create a simple response from the first result
                return {
                    "response": f"Based on what I know, {content_snippets[0] if content_snippets else 'we offer various services at MangoIT Solutions'}. How can I help you further?",
                    "sources": sources
                }
        
    # All methods related to hardcoded responses have been removed
    
    def _detect_language(self, text: str) -> str:
        """
        Detect language of text
        
        Args:
            text: Text to detect language from
            
        Returns:
            str: Detected language code ('en', 'hi', or 'hinglish')
        """
        # Simple detection based on common Hindi/Hinglish words and characters
        hindi_words = ["namaste", "kaise", "kya", "hai", "aap", "tum", "hum", "accha", "theek", "nahin", "nahi"]
        hindi_chars = set("अआइईउऊएऐओऔकखगघङचछजझञटठडढणतथदधनपफबभमयरलवशषसह")
        
        text_lower = text.lower()
        
        # Check for Hindi characters
        if any(char in hindi_chars for char in text):
            return "hi"
        
        # Count Hindi words
        hindi_word_count = sum(1 for word in hindi_words if word in text_lower)
        
        # Determine language based on word count
        if hindi_word_count >= 2:
            return "hinglish"
        
        return "en"  # Default to English
    
    async def _generate_technology_list_response(self, query: str, results: Dict[str, Any], conversation_history: str, language: str) -> Dict[str, Any]:
        """
        Generate a specialized response for technology listing requests
        
        Args:
            query (str): The user's query
            results (Dict[str, Any]): Retrieved information from the knowledge base
            conversation_history (str): Previous conversation history
            language (str): Detected language
            
        Returns:
            Dict[str, Any]: The generated response with sources
        """
        # Extract content from results
        content_snippets = []
        sources = []
        result_items = results.get("results", []) if isinstance(results, dict) else results
        
        # Process the results
        for i, result in enumerate(result_items[:15]):  # Use up to 15 results for comprehensive tech list
            if "content" in result:
                content_snippets.append(self._clean_content(result["content"]))
                
                # Add source
                if "source" in result:
                    source = result["source"]
                    chunk = result.get("chunk", i)
                    relevance = result.get("relevance", 0)
                    sources.append({
                        "source": source,
                        "chunk": chunk,
                        "relevance": relevance
                    })
        
        # Create a specialized prompt for technology listing
        prompt = f"""
        You are Sam, a friendly AI assistant for MangoIT Solutions. I need you to create a comprehensive list of technologies 
        that MangoIT works with, based on the information provided below.
        
        User query: "{query}"
        
        Information from knowledge base:
        {chr(10).join(content_snippets)}
        
        IMPORTANT INSTRUCTIONS:
        1. Extract ALL technologies, programming languages, frameworks, tools, and platforms mentioned in the information above.
        2. Format your response as a well-organized list with categories (e.g., Programming Languages, Frameworks, CMS, etc.).
        3. Start with a brief introduction about MangoIT's technical expertise.
        4. Use bullet points for better readability.
        5. Include at least 10-15 technologies if available in the information.
        6. Be comprehensive - don't miss any technologies mentioned in the information.
        7. Use a warm, conversational tone.
        8. DO NOT include source citations in your response.
        
        Your response:
        """
        
        try:
            # Generate response using LLM with thread offloading
            response = await asyncio.to_thread(self.llm.generate, prompt)
            
            # Clean up the response
            response = response.strip()
            response = re.sub(r'^(Sam:|Assistant:)\s*', '', response)
            
            # Remove source citations from the response
            response = re.sub(r'\(source: [^\)]+, chunk: \d+\)', '', response)
            response = re.sub(r'\(source: [^\)]+\)', '', response)
            
            # Clean up any double spaces created by removing citations
            response = re.sub(r'\s{2,}', ' ', response)
            
            return {
                "response": response,
                "sources": sources
            }
            
        except Exception as e:
            self.logger.error(f"Error generating technology list: {str(e)}")
            return {
                "response": "I can tell you that MangoIT works with various technologies including PHP, JavaScript, WordPress, Laravel, React, Python, and more. Would you like me to provide more specific details about any of these technologies?",
                "sources": sources
            }
    
    def _get_greeting(self, language: str = "en") -> str:
        """
        Get time-appropriate greeting in the specified language
        
        Args:
            language: Language code ('en', 'hi', or 'hinglish')
            
        Returns:
            str: Appropriate greeting
        """
        # Get current time in IST
        from datetime import datetime
        import pytz
        
        try:
            ist_timezone = pytz.timezone("Asia/Kolkata")
            current_time = datetime.now(ist_timezone).hour
        except Exception:
            # Fallback if timezone conversion fails
            current_time = datetime.now().hour
        
        if language == "hi":
            if current_time < 12:
                return "सुप्रभात! "
            elif current_time < 17:
                return "नमस्ते! "
            else:
                return "शुभ संध्या! "
        elif language == "hinglish":
            if current_time < 12:
                return "Good morning! "
            elif current_time < 17:
                return "Namaste! "
            else:
                return "Good evening! "
        else:  # English
            if current_time < 12:
                return "Good morning! "
            elif current_time < 17:
                return "Good afternoon! "
            else:
                return "Good evening! "
    
    def _clean_content(self, content: str) -> str:
        """Clean content by removing technical markers and metadata"""
        # Remove common technical patterns like HTML tags, URLs, etc.
        import re
        
        # Remove HTML-like tags
        content = re.sub(r'<[^>]+>', '', content)
        
        # Remove URLs
        content = re.sub(r'https?://\S+|www\.\S+', '', content)
        
        # Remove technical markers like og_title=, og_description=, etc.
        content = re.sub(r'\w+_\w+=', '', content)
        content = re.sub(r'\w+=', '', content)
        
        # Remove metadata patterns like 'language=en-US', 'keywords=...', etc.
        content = re.sub(r'language=[^,]+,?', '', content)
        content = re.sub(r'keywords=[^,]+,?', '', content)
        content = re.sub(r'robots=[^,]+,?', '', content)
        content = re.sub(r'og_[^,]+,?', '', content)
        
        # Remove quoted strings that look like metadata
        content = re.sub(r"'[^']*'\s*,", '', content)
        
        # Remove None, null values
        content = re.sub(r'None,?\s*', '', content)
        content = re.sub(r'null,?\s*', '', content)
        
        # Remove sequences of commas and quotes
        content = re.sub(r'[,\s\'"]+,', ' ', content)
        content = re.sub(r'\s*,\s*,\s*', ' ', content)
        
        # Remove extra whitespace
        content = re.sub(r'\s+', ' ', content).strip()
        
        return content
    
    def _format_conversational_response(self, topic: str, key_points: List[str], conversation_history: str) -> str:
        """Format the response in a conversational way"""
        # Start with a conversational opener
        if conversation_history:
            # Continuing conversation
            response = f"About {topic}, "
        else:
            # First interaction
            response = f"About {topic}, here's what I can tell you: "
        
        # Check if we have any valid key points after cleaning
        valid_key_points = []
        for point in key_points:
            # Skip points that still look like metadata
            if '=' in point or point.count(',') > 3 or point.count("'") > 3:
                continue
            # Skip very short points
            if len(point) < 20:
                continue
            valid_key_points.append(point)
        
        # Add key points in a conversational format
        if valid_key_points:
            response += valid_key_points[0]
            
            # Add additional points if available
            if len(valid_key_points) > 1:
                for point in valid_key_points[1:]:                    
                    # Don't repeat information
                    if point.lower() not in response.lower():
                        response += f" {point}."
        else:
            response += f"we offer specialized solutions tailored to your needs."
        
        # Add a closing statement
        response += " Would you like to know more about this or any other services we offer?"
        
        return response
