# src/main.py
import uvicorn
from config.config import settings
from src.utils.database_cleanup import perform_cleanup
from fastapi.security import APIKeyHeader
from fastapi import HTTPException, Depends
from fastapi.responses import JSONResponse
from src.models import (
    ChatRequest,
    ChatResponse,
    BatchUploadResponse,
    SummarizeRequest,
    SummaryResponse,
    FeedbackRequest
)
from src.implementations.document_service import DocumentService
from src.db.mongodb_store import MongoDBStore
from src.utils.llm_utils import get_llm_instance, get_vector_store
from src.utils.logger import logger
from src.utils.conversation_summarizer import ConversationSummarizer
from src.utils.drive_document_processor import DriveDocumentProcessor
from src.utils.document_processor import DocumentProcessor
from src.models.UserContact import UserContactRequest
from src.models.document import AllDocumentsResponse, StoredDocument
from src.agents.system_instructions_rag import SystemInstructionsRAGAgent
from src.utils.google_drive_service import GoogleDriveService
from google_auth_oauthlib.flow import Flow
from google.oauth2.credentials import Credentials
from fastapi.responses import RedirectResponse
from fastapi import FastAPI, UploadFile, File, HTTPException, BackgroundTasks
from fastapi.responses import StreamingResponse, FileResponse
from fastapi.staticfiles import StaticFiles
from fastapi.middleware.cors import CORSMiddleware  # Add this import
from typing import List
import uuid
from datetime import datetime
from pathlib import Path
import os
import asyncio

import chromadb
from pathlib import Path
import asyncio
import gc
import random
from typing import List
from src.utils.logger import logger
from config.config import settings
from src.vectorstores.chroma_manager import ChromaManager
from src.utils.llm_utils import cleanup_vectorstore

os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
# os.environ["OAUTHLIB_RELAX_TOKEN_SCOPE"] = "1"


# Import custom modules1
# from src.agents.rag_agent import RAGAgent


app = FastAPI(title="Chatbot API")

app.add_middleware(
    CORSMiddleware,
    allow_origins=["http://localhost:8080",
                   "http://localhost:3000", "https://talatmasud-chatbot-frontend.static.hf.space", "https://chatbot.neurovise.ai"],  # Add both ports
    allow_credentials=True,
    allow_methods=["*"],  # Allows all methods
    allow_headers=["*"],  # Allows all headers
)

# google_drive_service = GoogleDriveService()

# Initialize MongoDB
mongodb = MongoDBStore(settings.MONGODB_URI)

# Create uploads directory if it doesn't exist
# UPLOADS_DIR = Path("uploads")
UPLOADS_DIR = Path(settings.UPLOADS_DIR)
UPLOADS_DIR.mkdir(parents=True, exist_ok=True)

chroma_path = Path(settings.CHROMA_PATH)
chroma_path.mkdir(parents=True, exist_ok=True)
print(f"ChromaDB directory created at: {chroma_path}")

temp_dir = Path(settings.TEMP_DOWNLOAD_DIR)
temp_dir.mkdir(parents=True, exist_ok=True)
print(f"Temp downloads directory created at: {temp_dir}")


# Initialize core components
doc_processor = DocumentProcessor()
summarizer = ConversationSummarizer()
document_service = DocumentService(doc_processor, mongodb)

# Mount the uploads directory for static file serving
app.mount("/docs", StaticFiles(directory=str(UPLOADS_DIR)), name="documents")

# Security setup
API_KEY_HEADER = APIKeyHeader(name="ADMIN_API_KEY")


async def verify_api_key(api_key: str = Depends(API_KEY_HEADER)):
    """Verify admin API key"""
    if not settings.ADMIN_API_KEY or api_key != settings.ADMIN_API_KEY:
        raise HTTPException(
            status_code=403,
            detail="Invalid or missing API key"
        )
    return api_key


def get_chroma_client():
    """Get a new ChromaDB client instance"""
    return chromadb.PersistentClient(
        path=settings.CHROMA_PATH,
        settings=chromadb.Settings(
            allow_reset=True,
            is_persistent=True
        )
    )


@app.get("/documents")
async def get_all_documents():
    """Get all documents from MongoDB"""
    try:
        documents = await mongodb.get_all_documents()

        formatted_documents = []
        for doc in documents:
            try:
                formatted_doc = {
                    "document_id": doc.get("document_id"),
                    "filename": doc.get("filename"),
                    "content_type": doc.get("content_type"),
                    "file_size": doc.get("file_size"),
                    "url_path": doc.get("url_path"),
                    "upload_timestamp": doc.get("upload_timestamp"),
                    "source": doc.get("source")
                }
                formatted_documents.append(formatted_doc)
            except Exception as e:
                logger.error(
                    f"Error formatting document {doc.get('document_id', 'unknown')}: {str(e)}")
                continue
        # Sort documents by upload_timestamp in descending order (latest first)
        formatted_documents.sort(
            key=lambda x: x.get("upload_timestamp", datetime.min),
            reverse=True
        )

        return {
            "total_documents": len(formatted_documents),
            "documents": formatted_documents
        }
    except Exception as e:
        logger.error(f"Error retrieving documents: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/documents/{document_id}/download")
async def get_document_file(document_id: str):
    """Serve a document file by its ID"""
    try:
        # Get document info from MongoDB
        doc = await mongodb.get_document(document_id)
        if not doc:
            raise HTTPException(status_code=404, detail="Document not found")

        # Extract filename from url_path
        filename = doc["url_path"].split("/")[-1]
        file_path = UPLOADS_DIR / filename

        if not file_path.exists():
            raise HTTPException(
                status_code=404,
                detail=f"File not found on server: {filename}"
            )

        return FileResponse(
            path=str(file_path),
            filename=doc["filename"],
            media_type=doc["content_type"]
        )

    except Exception as e:
        logger.error(f"Error serving document file: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/documents/upload", response_model=BatchUploadResponse)
async def upload_documents(
    files: List[UploadFile] = File(...),
    background_tasks: BackgroundTasks = BackgroundTasks()
):
    """Upload and process multiple documents"""
    try:
        vector_store, _ = await get_vector_store()
        response = await document_service.process_documents(
            files,
            vector_store,
            background_tasks
        )
        return response
    except Exception as e:
        logger.error(f"Error in document upload: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/documentChunks")
async def get_all_document_chunks():
    """Get all document chunks from the vector store"""
    try:
        # Get vector store instance
        vector_store, _ = await get_vector_store()

        # Retrieve all documents
        all_documents = vector_store.get_all_documents()

        # If no documents, return a structured response instead of raising an exception
        if not all_documents:
            return {
                "total_documents": 0,
                "documents": [],
                "message": "No documents are currently stored in the vector store. Upload some documents to see chunks."
            }

        # Group chunks by document_id
        document_chunks = {}
        for doc in all_documents:
            # Safely extract document_id
            document_id = doc.get('metadata', {}).get('document_id',
                                                      doc.get('id',
                                                              str(uuid.uuid4())))

            # Ensure metadata is a dictionary
            metadata = doc.get('metadata', {}) if isinstance(
                doc.get('metadata'), dict) else {}

            # Create chunk entry
            chunk = {
                'text': str(doc.get('text', '')),
                'metadata': metadata
            }

            # Group chunks by document_id
            if document_id not in document_chunks:
                document_chunks[document_id] = []

            document_chunks[document_id].append(chunk)

        # Prepare response
        processed_documents = []
        for doc_id, chunks in document_chunks.items():
            processed_documents.append({
                "document_id": doc_id,
                "total_chunks": len(chunks),
                "chunks": chunks
            })

        return {
            "total_documents": len(processed_documents),
            "documents": processed_documents,
            "message": f"Successfully retrieved {len(processed_documents)} documents"
        }

    except Exception as e:
        # Log the full error for debugging
        logger.error(
            f"Error retrieving all document chunks: {str(e)}", exc_info=True)

        # Return a structured error response
        return {
            "total_documents": 0,
            "documents": [],
            "message": f"An error occurred while retrieving document chunks: {str(e)}"
        }


@app.get("/documentChunks/{document_id}")
async def get_document_chunks(document_id: str):
    """Get all chunks for a specific document"""
    try:
        vector_store, _ = await get_vector_store()
        chunks = vector_store.get_document_chunks(document_id)

        if not chunks:
            raise HTTPException(status_code=404, detail="Document not found")

        return {
            "document_id": document_id,
            "total_chunks": len(chunks),
            "chunks": chunks
        }
    except Exception as e:
        logger.error(f"Error retrieving document chunks: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.delete("/documents/{document_id}")
async def delete_document(document_id: str):
    """Delete document from MongoDB, ChromaDB, and physical storage"""
    try:
        # First get document details from MongoDB to get file path
        document = await mongodb.get_document(document_id)
        # if not document:
        #    raise HTTPException(status_code=404, detail="Document not found")

        # Get vector store instance
        vector_store, _ = await get_vector_store()

        # Delete physical file using document service
        deletion_success = await document_service.delete_document(document_id)
        if not deletion_success:
            logger.warning(
                f"Failed to delete physical file for document {document_id}")

        # Delete from vector store
        try:
            vector_store.delete_document(document_id)
        except Exception as e:
            logger.error(
                f"Error deleting document from vector store: {str(e)}")
            raise HTTPException(
                status_code=500,
                detail=f"Failed to delete document from vector store: {str(e)}"
            )

        # Delete from MongoDB - don't check return value since document might already be deleted
        await mongodb.delete_document(document_id)

        return {
            "status": "success",
            "message": f"Document {document_id} successfully deleted from all stores"
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"Error in delete_document endpoint: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/processDriveDocuments")
async def process_drive_documents():
    try:
        # Initialize vector store
        vector_store, _ = await get_vector_store()

        # Initialize Drive document processor
        drive_processor = DriveDocumentProcessor(
            google_service_account_path=settings.GOOGLE_SERVICE_ACCOUNT_PATH,
            folder_id=settings.GOOGLE_DRIVE_FOLDER_ID,
            temp_dir=settings.TEMP_DOWNLOAD_DIR,
            doc_processor=doc_processor,
            mongodb=mongodb  # Add MongoDB instance
        )

        # Process documents
        result = await drive_processor.process_documents(vector_store)
        return result

    except Exception as e:
        logger.error(f"Error in process_drive_documents: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=str(e)
        )


@app.post("/user/contact", response_model=ChatResponse)
async def create_user_contact(
    request: UserContactRequest,
    background_tasks: BackgroundTasks
):
    """Create or retrieve user conversation based on contact information"""
    try:
        # Check for existing user
        existing_conversation_id = await mongodb.find_existing_user(
            email=request.email,
            phone_number=request.phone_number
        )

        if existing_conversation_id:
            chat_request = ChatRequest(
                query=f'An old user with name: "{request.full_name}", email: "{request.email}" and phone number: "{request.phone_number}" wants support again. This is Introduction Create a welcome back message for him and ask how i can help you today?',
                llm_provider="openai",
                max_context_docs=3,
                temperature=1.0,
                stream=False,
                conversation_id=existing_conversation_id
            )
        else:
            # Create new conversation with user information
            new_conversation_id = str(uuid.uuid4())
            await mongodb.create_conversation(
                conversation_id=new_conversation_id,
                full_name=request.full_name,
                email=request.email,
                phone_number=request.phone_number
            )

            chat_request = ChatRequest(
                query=f'A new user with name: "{request.full_name}", email: "{request.email}" and phone number: "{request.phone_number}" wants support. This is Introduction Create a welcome message for him and ask how i can help you today?',
                llm_provider="openai",
                max_context_docs=3,
                temperature=1.0,
                stream=False,
                conversation_id=new_conversation_id
            )

        # Call chat_endpoint with the prepared request
        return await chat_endpoint(chat_request, background_tasks)

    except Exception as e:
        logger.error(f"Error in create_user_contact: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/chat", response_model=ChatResponse)
async def chat_endpoint(
    request: ChatRequest,
    background_tasks: BackgroundTasks
):
    """Chat endpoint with RAG support and enhanced Excel handling"""
    try:
        # Initialize core components
        logger.info(
            f"Initializing vector store and embedding: {str(datetime.now())}")
        vector_store, embedding_model = await get_vector_store()

        logger.info(f"Initializing LLM: {str(datetime.now())}")
        llm = get_llm_instance(request.llm_provider)

        # Initialize RAG agent
        # rag_agent = RAGAgent(
        #     llm=llm,
        #     embedding=embedding_model,
        #     vector_store=vector_store,
        #     mongodb=mongodb
        # )

        rag_agent = SystemInstructionsRAGAgent(
            llm=llm,
            embedding=embedding_model,
            vector_store=vector_store,
            mongodb=mongodb
        )

        # rag_agent.add_custom_role(
        #     "Knowledge based chatbot and introduction specialist",
        #     """You are a welcome agent with knowledge based specialist focusing on knowledge attached and create a beautiful welcome message.
        #     Your role is to:
        #     1. Your response should be short and to the point.
        #     2. Strictly follow this point for If it is an introduction. You strictly respond that "Welcome name of customer to our platform. How can I help you today?"
        #     """
        # )

        # rag_agent.add_custom_role(
        #     "Knowledge based chatbot",
        #     """You are a knowledge based specialist focusing on knowledge attached.
        #     Your role is to:
        #     1. Your response should be short and to the point.
        #     2. if it is not introduction then make sure to share the response from Vector store.
        #     3. If you do not find relevant information. Just say I do not have this information but this do not apply to introduction message.
        #     4. If there is an introduction, you should ignore above roles and connect with LLm to have a welcome message for the user.
        #     """
        # )

        # Use provided conversation ID or create new one
        conversation_id = request.conversation_id or str(uuid.uuid4())

        # Process the query
        query = request.query

        # Add specific instructions for certain types of queries
        # if "introduce" in query.lower() or "name" in query.lower() or "email" in query.lower():
        # query += ". The response should be short and to the point. Make sure to not add any irrelevant information. make sure to share the response from Vector store, if you do not find information in vector store. Just respond I do not have information. Keep the introduction concise and friendly."

        # Generate response
        logger.info(f"Generating response: {str(datetime.now())}")

        max_retries = 3
        retry_count = 0
        response = None
        last_error = None

        while retry_count < max_retries and response is None:
            try:
                response = await rag_agent.generate_response(
                    query=query,
                    conversation_id=conversation_id,
                    temperature=request.temperature,
                    max_tokens=request.max_tokens if hasattr(
                        request, 'max_tokens') else None
                )
                break
            except Exception as e:
                last_error = e
                retry_count += 1
                logger.warning(f"Attempt {retry_count} failed: {str(e)}")
                await asyncio.sleep(1)  # Brief pause before retry

        if response is None:
            raise last_error or Exception(
                "Failed to generate response after retries")

        logger.info(f"Response generated: {str(datetime.now())}")

        # Prepare response metadata
        metadata = {
            'llm_provider': request.llm_provider,
            'temperature': request.temperature,
            'conversation_id': conversation_id
        }

        # Add Excel-specific metadata if present
        has_excel_content = any(
            doc and 'Sheet:' in doc
            for doc in (response.context_docs or [])
        )
        if has_excel_content:
            try:
                metadata['excel_content'] = True

                # Extract Excel-specific insights if available
                if hasattr(rag_agent, 'get_excel_insights'):
                    excel_insights = rag_agent.get_excel_insights(
                        query=query,
                        context_docs=response.context_docs
                    )
                    if excel_insights:
                        metadata['excel_insights'] = excel_insights
            except Exception as e:
                logger.warning(f"Error processing Excel metadata: {str(e)}")

        # Store message in chat history
        await mongodb.store_message(
            conversation_id=conversation_id,
            query=request.query,
            response=response.response,
            context=response.context_docs,
            sources=response.sources,
            llm_provider=request.llm_provider
        )

        # Prepare and return response
        chat_response = ChatResponse(
            response=response.response,
            context=response.context_docs,
            sources=response.sources,
            conversation_id=conversation_id,
            timestamp=datetime.utcnow(),
            relevant_doc_scores=response.scores if hasattr(
                response, 'scores') else None,
            metadata=metadata
        )

        # Log completion
        logger.info(f"Chat response completed: {str(datetime.now())}")

        return chat_response

    except Exception as e:
        logger.error(f"Error in chat endpoint: {str(e)}", exc_info=True)
        # Convert known errors to HTTPException with appropriate status codes
        if isinstance(e, ValueError):
            raise HTTPException(status_code=400, detail=str(e))
        elif isinstance(e, (KeyError, AttributeError)):
            raise HTTPException(
                status_code=500, detail="Internal processing error")
        else:
            raise HTTPException(status_code=500, detail=str(e))


@app.get("/chat/history/{conversation_id}")
async def get_conversation_history(conversation_id: str):
    """Get complete conversation history"""
    history = await mongodb.get_conversation_history(conversation_id)

    if not history:
        raise HTTPException(status_code=404, detail="Conversation not found")

    return {
        "conversation_id": conversation_id,
        "messages": history
    }


@app.post("/chat/summarize", response_model=SummaryResponse)
async def summarize_conversation(request: SummarizeRequest):
    """Generate a summary of a conversation"""
    try:
        messages = await mongodb.get_messages_for_summary(request.conversation_id)

        if not messages:
            raise HTTPException(
                status_code=404, detail="Conversation not found")

        summary = await summarizer.summarize_conversation(
            messages,
            include_metadata=request.include_metadata
        )

        return SummaryResponse(**summary)

    except Exception as e:
        logger.error(f"Error generating summary: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/chat/feedback/{conversation_id}")
async def submit_feedback(
    conversation_id: str,
    feedback_request: FeedbackRequest
):
    """Submit feedback for a conversation"""
    try:
        # Validate conversation exists
        conversation = await mongodb.get_conversation_metadata(conversation_id)
        if not conversation:
            raise HTTPException(
                status_code=404, detail="Conversation not found")

        # Update feedback
        success = await mongodb.update_feedback(
            conversation_id=conversation_id,
            feedback=feedback_request.feedback,
            rating=feedback_request.rating
        )

        if not success:
            raise HTTPException(
                status_code=500,
                detail="Failed to update feedback"
            )

        return {
            "status": "success",
            "message": "Feedback submitted successfully",
            "data": {
                "conversation_id": conversation_id,
                "feedback": feedback_request.feedback,
                "rating": feedback_request.format_rating()
            }
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"Error submitting feedback: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/debug/config")
async def debug_config():
    """Debug endpoint to check configuration"""
    import os
    from config.config import settings
    from pathlib import Path

    debug_info = {
        "environment_variables": {
            "OPENAI_API_KEY": "[SET]" if os.getenv('OPENAI_API_KEY') else "[NOT SET]",
            "OPENAI_MODEL": os.getenv('OPENAI_MODEL', '[NOT SET]')
        },
        "settings": {
            "OPENAI_API_KEY": "[SET]" if settings.OPENAI_API_KEY else "[NOT SET]",
            "OPENAI_MODEL": settings.OPENAI_MODEL,
        },
        "files": {
            "env_file_exists": Path('.env').exists(),
            "openai_config_exists": (Path.home() / '.openai' / 'api_key').exists()
        }
    }

    if settings.OPENAI_API_KEY:
        key = settings.OPENAI_API_KEY
        debug_info["api_key_info"] = {
            "length": len(key),
            "preview": f"{key[:4]}...{key[-4:]}" if len(key) > 8 else "[INVALID LENGTH]"
        }

    return debug_info


@app.post("/admin/reset-chroma")
async def reset_chroma_db(api_key: str = Depends(verify_api_key)):
    """
    Reset ChromaDB completely - use with caution

    This endpoint cleans up all resources and recreates ChromaDB
    """
    try:
        # Reset all vector store resources
        await cleanup_vectorstore()

        return {
            "status": "success",
            "message": "ChromaDB reset complete. You may need to restart the application for changes to take effect.",
            "details": {
                "chroma_path": settings.CHROMA_PATH
            }
        }

    except Exception as e:
        logger.error(f"Error resetting ChromaDB: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"Failed to reset ChromaDB: {str(e)}"
        )


@app.post("/admin/cleanup")
async def cleanup_databases(
    include_files: bool = True,
    api_key: str = Depends(verify_api_key)
):
    """
    Clean up all data from ChromaDB and MongoDB

    Args:
        include_files (bool): Whether to also delete uploaded files

    Returns:
        Dict: Cleanup operation summary with restart information
    """
    try:
        result = await perform_cleanup(mongodb, include_files)

        # If restart is needed, return 202 Accepted instead of 200 OK
        if result.get("restart_needed"):
            return JSONResponse(
                status_code=202,
                content={
                    **result,
                    "message": "Cleanup partially completed. Please restart the server to complete ChromaDB cleanup."
                }
            )

        return result

    except Exception as e:
        logger.error(f"Error in cleanup operation: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"Error during cleanup: {str(e)}"
        )


@app.get("/health")
async def health_check():
    """Health check endpoint"""
    return {"status": "healthy"}


if __name__ == "__main__":
    import os
    import uvicorn
    # Get port from environment variable or default to 8000
    port = int(os.getenv("PORT", 8000))

    # Run the application
    uvicorn.run(
        "src.main:app",
        host="0.0.0.0",
        port=port,
        reload=False  # Set to False for production
    )