"""
Guardrails for the chatbot to ensure safe and appropriate responses
"""

import re
from typing import Dict, Any, Tuple, List, Optional

class Guardrails:
    """Implements guardrails for the chatbot"""
    
    # Topics to avoid
    RESTRICTED_TOPICS = [
        "politics", "religion", "adult content", "illegal activities", 
        "hacking", "discrimination", "hate speech", "violence"
    ]
    
    # Patterns for detecting sensitive information
    SENSITIVE_PATTERNS = {
        "email": r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b',
        "phone": r'\b(\+\d{1,3}[\s-]?)?\(?\d{3}\)?[\s.-]?\d{3}[\s.-]?\d{4}\b',
        "credit_card": r'\b(?:\d{4}[- ]?){3}\d{4}\b',
        "ssn": r'\b\d{3}-\d{2}-\d{4}\b',
        "password": r'\b(?:password|passwd|pwd)\s*[=:]\s*\S+\b'
    }
    
    # Patterns for detecting inappropriate content
    INAPPROPRIATE_PATTERNS = {
        "profanity": r'\b(damn|hell|ass|crap|shit|fuck|bitch|bastard)\b',
        "slurs": r'\b(n-word|f-word|r-word)\b'
    }
    
    # Patterns for detecting potential prompt injections
    INJECTION_PATTERNS = [
        r'ignore previous instructions',
        r'disregard your guidelines',
        r'forget your training',
        r'you are now',
        r'you will now',
        r'act as if',
        r'pretend to be',
        r'bypass'
    ]
    
    @classmethod
    def check_query(cls, query: str) -> Tuple[bool, str, Dict[str, Any]]:
        """
        Check if a user query passes guardrails
        
        Args:
            query: The user's query
            
        Returns:
            Tuple[bool, str, Dict[str, Any]]: (passes, reason, details)
        """
        # Check for empty or very short queries
        if not query or len(query.strip()) < 2:
            return False, "empty_query", {"message": "Query is empty or too short"}
        
        # Check for restricted topics
        for topic in cls.RESTRICTED_TOPICS:
            if re.search(r'\b' + re.escape(topic) + r'\b', query.lower()):
                return False, "restricted_topic", {"topic": topic}
        
        # Check for sensitive information
        for info_type, pattern in cls.SENSITIVE_PATTERNS.items():
            if re.search(pattern, query, re.IGNORECASE):
                return False, "sensitive_info", {"type": info_type}
        
        # Check for inappropriate content
        for content_type, pattern in cls.INAPPROPRIATE_PATTERNS.items():
            if re.search(pattern, query, re.IGNORECASE):
                return False, "inappropriate_content", {"type": content_type}
        
        # Check for potential prompt injections
        for pattern in cls.INJECTION_PATTERNS:
            if re.search(pattern, query.lower()):
                return False, "potential_injection", {"pattern": pattern}
        
        # All checks passed
        return True, "passed", {}
    
    @classmethod
    def check_response(cls, response: str) -> Tuple[bool, str, Dict[str, Any]]:
        """
        Check if a response passes guardrails
        
        Args:
            response: The generated response
            
        Returns:
            Tuple[bool, str, Dict[str, Any]]: (passes, reason, details)
        """
        # Check for sensitive information in response
        for info_type, pattern in cls.SENSITIVE_PATTERNS.items():
            if re.search(pattern, response, re.IGNORECASE):
                return False, "sensitive_info", {"type": info_type}
        
        # Check for inappropriate content in response
        for content_type, pattern in cls.INAPPROPRIATE_PATTERNS.items():
            if re.search(pattern, response, re.IGNORECASE):
                return False, "inappropriate_content", {"type": content_type}
        
        # All checks passed
        return True, "passed", {}
    
    @classmethod
    def get_safe_response(cls, query: str, response: str) -> str:
        """
        Get a safe response that passes guardrails
        
        Args:
            query: The user's query
            response: The generated response
            
        Returns:
            str: A safe response
        """
        # Check query
        query_passes, query_reason, query_details = cls.check_query(query)
        if not query_passes:
            if query_reason == "restricted_topic":
                return f"I'm sorry, but I can't discuss topics related to {query_details['topic']}. Is there something else I can help you with regarding MangoIT Solutions?"
            elif query_reason == "sensitive_info":
                return "I noticed you shared what might be sensitive information. For your privacy and security, please don't share personal data like emails, phone numbers, or passwords."
            elif query_reason == "inappropriate_content":
                return "I'm here to help with questions about MangoIT Solutions. Let's keep our conversation professional and respectful."
            elif query_reason == "potential_injection":
                return "I'm here to assist you with information about MangoIT Solutions and our services. How can I help you today?"
            else:
                return "I'm sorry, but I couldn't understand your query. Could you please rephrase it?"
        
        # Check response
        response_passes, response_reason, response_details = cls.check_response(response)
        if not response_passes:
            # Use a safe fallback response
            return "I'd be happy to help you with information about MangoIT Solutions and our services. Could you please let me know what specific information you're looking for?"
        
        # Return the original response if it passes all checks
        return response

# Create a singleton instance
guardrails = Guardrails
