import gradio as gr
import requests
import json
import base64
from PIL import Image
import io
import time

def encode_image(image):
    if isinstance(image, dict) and 'path' in image:
        image_path = image['path']
    elif isinstance(image, str):
        image_path = image
    else:
        raise ValueError("Unsupported image format")
    
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode('utf-8')

def bot_streaming(message, history, api_key, model, system_prompt, temperature, max_tokens, top_p, top_k, frequency_penalty, presence_penalty, repetition_penalty, stop, min_p, top_a, seed, logit_bias, logprobs, top_logprobs, response_format, tools, tool_choice):
    headers = {
        "Authorization": f"Bearer {api_key}",
        "Content-Type": "application/json"
    }

    messages = []
    images = []

    if system_prompt:
        messages.append({"role": "system", "content": system_prompt})

    for i, msg in enumerate(history):
        if isinstance(msg[0], tuple):
            image, text = msg[0]
            base64_image = encode_image(image)
            messages.append({
                "role": "user",
                "content": [
                    {"type": "text", "text": text},
                    {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}}
                ]
            })
            messages.append({"role": "assistant", "content": msg[1]})
            images.append(Image.open(image['path'] if isinstance(image, dict) else image).convert("RGB"))
        else:
            messages.append({"role": "user", "content": msg[0]})
            messages.append({"role": "assistant", "content": msg[1]})

    if isinstance(message, dict) and "files" in message and message["files"]:
        image = message["files"][0]
        base64_image = encode_image(image)
        content = [
            {"type": "text", "text": message["text"]},
            {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}}
        ]
        images.append(Image.open(image['path'] if isinstance(image, dict) else image).convert("RGB"))
    else:
        content = message["text"] if isinstance(message, dict) else message

    messages.append({"role": "user", "content": content})

    data = {
        "model": model,
        "messages": messages,
        "stream": True,
        "temperature": temperature,
        "max_tokens": max_tokens,
        "top_p": top_p,
        "top_k": top_k,
        "frequency_penalty": frequency_penalty,
        "presence_penalty": presence_penalty,
        "repetition_penalty": repetition_penalty,
        "stop": stop if stop else None,
        "min_p": min_p,
        "top_a": top_a,
        "seed": seed,
        "logit_bias": logit_bias,
        "logprobs": logprobs,
        "top_logprobs": top_logprobs,
        "response_format": response_format,
        "tools": tools,
        "tool_choice": tool_choice
    }

    response = requests.post(
        "https://openrouter.ai/api/v1/chat/completions",
        headers=headers,
        json=data,
        stream=True
    )

    buffer = ""
    for chunk in response.iter_lines():
        if chunk:
            chunk = chunk.decode('utf-8')
            if chunk.startswith("data: "):
                chunk = chunk[6:]
                if chunk.strip() == "[DONE]":
                    break
                try:
                    chunk_data = json.loads(chunk)
                    if 'choices' in chunk_data and len(chunk_data['choices']) > 0:
                        delta = chunk_data['choices'][0].get('delta', {})
                        if 'content' in delta:
                            buffer += delta['content']
                            yield buffer
                            time.sleep(0.01)
                except json.JSONDecodeError:
                    continue

with gr.Blocks(theme=gr.themes.Soft()) as demo:
    gr.Markdown("""
    # 🤖 OpenRouter API Multimodal Chat

    Chat with various AI models using the OpenRouter API. Supports text and image interactions.

    ## 🚀 Quick Start:
    1. Enter your OpenRouter API key
    2. Choose a model or enter a custom model name/endpoint
    3. Start chatting!

    Enjoy your AI-powered conversation!
    """)

    with gr.Row():
        with gr.Column(scale=1):
            api_key = gr.Textbox(label="API Key", type="password", placeholder="Enter your OpenRouter API key")
            model = gr.Dropdown(
                label="Select Model",
                choices=[
                    "google/gemini-flash-1.5",
                    "openai/gpt-4o-mini",
                    "anthropic/claude-3.5-sonnet:beta",
                    "gryphe/mythomax-l2-13b",
                    "meta-llama/llama-3.1-70b-instruct",
                    "microsoft/wizardlm-2-8x22b",
                    "nousresearch/hermes-3-llama-3.1-405b",
                    "mistralai/mistral-nemo",
                    "meta-llama/llama-3.1-8b-instruct",
                    "deepseek/deepseek-chat",
                    "mistralai/mistral-tiny",
                    "openai/gpt-4o",
                    "mistralai/mistral-7b-instruct",
                    "meta-llama/llama-3-70b-instruct",
                    "microsoft/wizardlm-2-7b"
                ],
                value="google/gemini-flash-1.5",
                allow_custom_value=True
            )
            system_prompt = gr.Textbox(label="System Prompt", placeholder="Enter a system prompt (optional)")
            
            with gr.Accordion("Common Settings", open=False):
                temperature = gr.Slider(minimum=0, maximum=2, value=1, step=0.1, label="Temperature")
                max_tokens = gr.Slider(minimum=1, maximum=4096, value=1000, step=1, label="Max Tokens")
                top_p = gr.Slider(minimum=0, maximum=1, value=1, step=0.01, label="Top P")
                frequency_penalty = gr.Slider(minimum=-2, maximum=2, value=0, step=0.1, label="Frequency Penalty")
            
            with gr.Accordion("Advanced Settings", open=False):
                presence_penalty = gr.Slider(minimum=-2, maximum=2, value=0, step=0.1, label="Presence Penalty")
                stop = gr.Textbox(label="Stop Sequence")
                top_k = gr.Slider(minimum=0, maximum=100, value=0, step=1, label="Top K")
                repetition_penalty = gr.Slider(minimum=0, maximum=2, value=1, step=0.1, label="Repetition Penalty")
                min_p = gr.Slider(minimum=0, maximum=1, value=0, step=0.01, label="Min P")
            
            with gr.Accordion("Expert Settings", open=False):
                top_a = gr.Slider(minimum=0, maximum=1, value=0, step=0.01, label="Top A")
                seed = gr.Number(label="Seed", precision=0)
                logit_bias = gr.Textbox(label="Logit Bias (JSON)")
                logprobs = gr.Checkbox(label="Log Probabilities")
                top_logprobs = gr.Slider(minimum=0, maximum=20, value=0, step=1, label="Top Log Probabilities")
                response_format = gr.Textbox(label="Response Format (JSON)")
                tools = gr.Textbox(label="Tools (JSON Array)")
                tool_choice = gr.Textbox(label="Tool Choice")

        with gr.Column(scale=2):
            chatbot = gr.ChatInterface(
                fn=bot_streaming,
                additional_inputs=[
                    api_key, model, system_prompt, temperature, max_tokens, top_p, top_k,
                    frequency_penalty, presence_penalty, repetition_penalty, stop,
                    min_p, top_a, seed, logit_bias, logprobs, top_logprobs,
                    response_format, tools, tool_choice
                ],
                title="đŸ’Ŧ Chat with AI",
                description="Upload images or type your message to start the conversation.",
                retry_btn="🔄 Retry",
                undo_btn="â†Šī¸ Undo",
                clear_btn="đŸ—‘ī¸ Clear",
                multimodal=True,
                cache_examples=False,
                fill_height=True,
            )

    gr.Markdown("""
    ## 🔧 Settings:
    - Adjust basic parameters in the "Common Settings" section
    - Fine-tune options in the "Advanced Settings" section
    - Access expert-level controls in the "Expert Settings" section
    - Upload images for multimodal interactions
    """)

demo.launch(debug=True, share=True)