# import gradio as gr
# from huggingface_hub import InferenceClient

# """
# For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
# """
# client = InferenceClient("Qwen/Qwen2.5-Coder-32B-Instruct")


# def respond(
#     message,
#     history: list[tuple[str, str]],
#     system_message,
#     max_tokens,
#     temperature,
#     top_p,
# ):
#     messages = [{"role": "system", "content": system_message}]

#     for val in history:
#         if val[0]:
#             messages.append({"role": "user", "content": val[0]})
#         if val[1]:
#             messages.append({"role": "assistant", "content": val[1]})

#     messages.append({"role": "user", "content": message})

#     response = ""

#     for message in client.chat_completion(
#         messages,
#         max_tokens=max_tokens,
#         stream=True,
#         temperature=temperature,
#         top_p=top_p,
#     ):
#         token = message.choices[0].delta.content

#         response += token
#         yield response


# """
# For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
# """
# demo = gr.ChatInterface(
#     respond,
#     additional_inputs=[
#         gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
#         gr.Slider(minimum=1, maximum=2048, value=2048, step=1, label="Max new tokens"),
#         gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
#         gr.Slider(
#             minimum=0.1,
#             maximum=1.0,
#             value=0.95,
#             step=0.05,
#             label="Top-p (nucleus sampling)",
#         ),
#     ],
# )


# if __name__ == "__main__":
#     demo.launch()

# import gradio as gr
# from huggingface_hub import InferenceClient

"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
# client = InferenceClient("Qwen/Qwen2.5-Coder-32B-Instruct")

# def respond(message, history: list[tuple[str, str]]):
#     system_message = (
#     "You are a helpful and experienced coding assistant specialized in web development. "
#     "Help the user by generating complete and functional code for building websites. "
#     "You can provide HTML, CSS, JavaScript, and backend code (like Flask, Node.js, etc.) based on their requirements. "
#     "Break down the tasks clearly if needed, and be friendly and supportive in your responses.")
#     max_tokens = 2048
#     temperature = 0.7
#     top_p = 0.95

#     messages = [{"role": "system", "content": system_message}]

#     for val in history:
#         if val[0]:
#             messages.append({"role": "user", "content": val[0]})
#         if val[1]:
#             messages.append({"role": "assistant", "content": val[1]})

#     messages.append({"role": "user", "content": message})

#     response = ""

#     for message in client.chat_completion(
#         messages,
#         max_tokens=max_tokens,
#         stream=True,
#         temperature=temperature,
#         top_p=top_p,
#     ):
#         token = message.choices[0].delta.content

#         response += token
#         yield response

# """
# For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
# """
# demo = gr.ChatInterface(respond)

# if __name__ == "__main__":
#     demo.launch()

# import gradio as gr
# from huggingface_hub import InferenceClient

# """
# For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
# """
# client = InferenceClient("Qwen/Qwen2.5-Coder-32B-Instruct")

# def respond(message, history: list[tuple[str, str]]):
#     system_message = (
#         "You are a helpful and experienced coding assistant specialized in web development. "
#         "Help the user by generating complete and functional code for building websites. "
#         "You can provide HTML, CSS, JavaScript, and backend code (like Flask, Node.js, etc.) based on their requirements. "
#         "Break down the tasks clearly if needed, and be friendly and supportive in your responses."
#     )
#     max_tokens = 2048
#     temperature = 0.7
#     top_p = 0.95

#     messages = [{"role": "system", "content": system_message}]

#     for val in history:
#         if val[0]:
#             messages.append({"role": "user", "content": val[0]})
#         if val[1]:
#             messages.append({"role": "assistant", "content": val[1]})

#     messages.append({"role": "user", "content": message})

#     response = ""

#     for message in client.chat_completion(
#         messages,
#         max_tokens=max_tokens,
#         stream=True,
#         temperature=temperature,
#         top_p=top_p,
#     ):
#         token = message.choices[0].delta.content

#         response += token
#         yield response

# """
# For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
# """
# demo = gr.ChatInterface(respond)

# if __name__ == "__main__":
#     demo.launch()

# import gradio as gr
# from huggingface_hub import InferenceClient

# # 1. Instantiate with named model param
# client = InferenceClient(model="Qwen/Qwen2.5-Coder-32B-Instruct")

# def respond(message, history: list[tuple[str, str]]):
#     system_message = (
#         "You are a helpful and experienced coding assistant specialized in web development. "
#         "Help the user by generating complete and functional code for building websites. "
#         "You can provide HTML, CSS, JavaScript, and backend code (like Flask, Node.js, etc.) "
#         "based on their requirements."
#     )
#     max_tokens = 2048
#     temperature = 0.7
#     top_p = 0.95

#     # Build messages in OpenAI-compatible format
#     messages = [{"role": "system", "content": system_message}]
#     for user_msg, assistant_msg in history:
#         if user_msg:
#             messages.append({"role": "user", "content": user_msg})
#         if assistant_msg:
#             messages.append({"role": "assistant", "content": assistant_msg})
#     messages.append({"role": "user", "content": message})

#     response = ""
#     # 2. Use named parameters and alias if desired
#     for chunk in client.chat.completions.create(
#         model="Qwen/Qwen2.5-Coder-32B-Instruct",
#         messages=messages,
#         max_tokens=max_tokens,
#         stream=True,
#         temperature=temperature,
#         top_p=top_p,
#     ):
#         # 3. Extract token content
#         token = chunk.choices[0].delta.content or ""
#         response += token
#         yield response

# # 4. Wire up Gradio chat interface
# demo = gr.ChatInterface(respond, type="messages")

# if __name__ == "__main__":
#     demo.launch()
# import gradio as gr
# from huggingface_hub import InferenceClient

# hf_token = "HF_TOKEN"


# # Ensure token is available
# if hf_token is None:
#     raise ValueError("HUGGINGFACEHUB_API_TOKEN is not set in .env file or environment.")

# # Instantiate Hugging Face Inference Client with token
# client = InferenceClient(
#     model="Qwen/Qwen2.5-Coder-32B-Instruct",
#     token=hf_token
# )

# def respond(message, history: list[tuple[str, str]]):
#     system_message = (
#         "You are a helpful and experienced coding assistant specialized in web development. "
#         "Help the user by generating complete and functional code for building websites. "
#         "You can provide HTML, CSS, JavaScript, and backend code (like Flask, Node.js, etc.) "
#         "based on their requirements."
#     )
#     max_tokens = 2048
#     temperature = 0.7
#     top_p = 0.95

#     # Build conversation history
#     messages = [{"role": "system", "content": system_message}]
#     for user_msg, assistant_msg in history:
#         if user_msg:
#             messages.append({"role": "user", "content": user_msg})
#         if assistant_msg:
#             messages.append({"role": "assistant", "content": assistant_msg})
#     messages.append({"role": "user", "content": message})

#     response = ""
#     # Stream the response from the model
#     for chunk in client.chat.completions.create(
#         model="Qwen/Qwen2.5-Coder-32B-Instruct",
#         messages=messages,
#         max_tokens=max_tokens,
#         stream=True,
#         temperature=temperature,
#         top_p=top_p,
#     ):
#         token = chunk.choices[0].delta.content or ""
#         response += token
#         yield response

# # Gradio UI
# demo = gr.ChatInterface(respond, type="messages")

# if __name__ == "__main__":
#     demo.launch()

# import gradio as gr
# from transformers import AutoTokenizer, AutoModelForCausalLM
# import torch

# # Load once globally
# tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-32B-Instruct")
# model = AutoModelForCausalLM.from_pretrained(
#     "Qwen/Qwen2.5-Coder-32B-Instruct",
#     device_map="auto",
#     torch_dtype=torch.float16,
# )

# def respond(message, history):
#     system_prompt = (
#         "You are a helpful coding assistant specialized in web development. "
#         "Provide complete code snippets for HTML, CSS, JS, Flask, Node.js etc."
#     )
#     # Build input prompt including chat history
#     chat_history = ""
#     for user_msg, bot_msg in history:
#         chat_history += f"User: {user_msg}\nAssistant: {bot_msg}\n"
#     prompt = f"{system_prompt}\n{chat_history}User: {message}\nAssistant:"

#     inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
#     outputs = model.generate(
#         **inputs,
#         max_new_tokens=512,
#         temperature=0.7,
#         do_sample=True,
#         top_p=0.95,
#         eos_token_id=tokenizer.eos_token_id,
#     )
#     generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)

#     # Extract only the new response part after the prompt
#     response = generated_text[len(prompt):].strip()

#     # Append current Q/A to history
#     history.append((message, response))
#     return "", history

# demo = gr.ChatInterface(respond, type="messages")

# if __name__ == "__main__":
#     demo.launch()
# import os
# import gradio as gr
# from huggingface_hub import InferenceClient
# from dotenv import load_dotenv

# # Load .env variables (make sure to have HF_TOKEN in .env or set as env var)
# load_dotenv()
# HF_TOKEN = os.getenv("HF_TOKEN")  # or directly assign your token here as string

# # Initialize InferenceClient with Hugging Face API token
# client = InferenceClient(
#     model="deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
#     token=HF_TOKEN
# )

# def respond(message, history):
#     """
#     Chat response generator function streaming from Hugging Face Inference API.
#     """
#     system_message = (
#         "You are a helpful and experienced coding assistant specialized in web development. "
#         "Help the user by generating complete and functional code for building websites. "
#         "You can provide HTML, CSS, JavaScript, and backend code (like Flask, Node.js, etc.) "
#         "based on their requirements."
#     )
#     max_tokens = 2048
#     temperature = 0.7
#     top_p = 0.95

#     # Prepare messages in OpenAI chat format
#     messages = [{"role": "system", "content": system_message}]
#     for user_msg, assistant_msg in history:
#         if user_msg:
#             messages.append({"role": "user", "content": user_msg})
#         if assistant_msg:
#             messages.append({"role": "assistant", "content": assistant_msg})
#     messages.append({"role": "user", "content": message})

#     response = ""
#     # Stream response tokens from Hugging Face Inference API
#     for chunk in client.chat.completions.create(
#         model="deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
#         messages=messages,
#         max_tokens=max_tokens,
#         stream=True,
#         temperature=temperature,
#         top_p=top_p,
#     ):
#         token = chunk.choices[0].delta.get("content", "")
#         response += token
#         yield response

# # Create Gradio chat interface
# demo = gr.ChatInterface(fn=respond, title="Website Building Assistant")

# if __name__ == "__main__":
#     demo.launch()
# import os
# import gradio as gr
# from huggingface_hub import InferenceClient
# from dotenv import load_dotenv

# # Load environment variables
# load_dotenv()
# HF_TOKEN = os.getenv("HF_TOKEN")  # Ensure this is set in .env

# # Initialize Hugging Face Inference Client
# client = InferenceClient(
#     model="deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
#     token=HF_TOKEN
# )

# # Define system instructions for the chatbot
# system_message = (
#     "You are a helpful and experienced coding assistant specialized in web development. "
#     "Help the user by generating complete and functional code for building websites. "
#     "You can provide HTML, CSS, JavaScript, and backend code (like Flask, Node.js, etc.) "
#     "based on their requirements."
# )

# # Define the response generation function
# def respond(message, history):
#     max_tokens = 2048
#     temperature = 0.7
#     top_p = 0.95

#     # Convert chat history into OpenAI-style format
#     messages = [{"role": "system", "content": system_message}]
#     for item in history:
#         role = item["role"]
#         content = item["content"]
#         messages.append({"role": role, "content": content})
    
#     # Add the latest user message
#     messages.append({"role": "user", "content": message})

#     response = ""

#     # Streaming response from the Hugging Face Inference API
#     for chunk in client.chat.completions.create(
#         model="deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
#         messages=messages,
#         max_tokens=max_tokens,
#         stream=True,
#         temperature=temperature,
#         top_p=top_p,
#     ):
#         token = chunk.choices[0].delta.get("content")
#         if token is not None:
#             response += token
#             yield response

# # Create Gradio Chat Interface
# demo = gr.ChatInterface(
#     fn=respond,
#     title="Website Building Assistant",
#     chatbot=gr.Chatbot(show_label=False),
#     type="openai",  # Use OpenAI-style message format
# )

# if __name__ == "__main__":
#     demo.launch()# app.py

# app.py

# app.py

# import os
# import gradio as gr
# from huggingface_hub import InferenceClient
# from dotenv import load_dotenv

# # Load environment variables
# load_dotenv()
# HF_TOKEN = os.getenv("HF_TOKEN")

# # Initialize Hugging Face Inference Client
# client = InferenceClient(
#     model="mistralai/Codestral-22B-v0.1",
#     token=HF_TOKEN
# )

# # System prompt for coding assistant
# system_message = (
#     "You are a helpful and experienced coding assistant specialized in web development. "
#     "Help the user by generating complete and functional code for building websites. "
#     "You can provide HTML, CSS, JavaScript, and backend code (like Flask, Node.js, etc.) "
#     "based on their requirements."
# )

# # Streaming chatbot logic using chat.completions
# def respond(message, history):
#     # Prepare messages with system prompt
#     messages = [{"role": "system", "content": system_message}]
#     for msg in history:
#         messages.append(msg)
#     messages.append({"role": "user", "content": message})

#     # Stream response from the model
#     response = ""
#     for chunk in client.chat.completions.create(
#         model="mistralai/Codestral-22B-v0.1",
#         messages=messages,
#         max_tokens=1024,
#         temperature=0.7,
#         top_p=0.95,
#         stream=True,
#     ):
#         token = chunk.choices[0].delta.get("content", "") or ""
#         response += token
#         yield response

# # Create Gradio interface
# with gr.Blocks() as demo:
#     chatbot = gr.Chatbot(type='messages')  # Use modern message format
#     gr.ChatInterface(fn=respond, chatbot=chatbot, type="messages")  # Match format

# # Launch app
# if __name__ == "__main__":
#     demo.launch()


# app.py

import os
import gradio as gr
from huggingface_hub import InferenceClient
from dotenv import load_dotenv

# Load environment variables
load_dotenv()
HF_TOKEN = os.getenv("HF_TOKEN")

# Initialize Hugging Face Inference Client
client = InferenceClient(
    model="mistralai/Mistral-7B-Instruct-v0.3",
    token=HF_TOKEN
)

# System prompt for coding assistant
system_message = (
    "You are a helpful and experienced coding assistant specialized in web development. "
    "Help the user by generating complete and functional code for building websites. "
    "You can provide HTML, CSS, JavaScript, and backend code (like Flask, Node.js, etc.) "
    "based on their requirements."
)

# Streaming chatbot logic
def respond(message, history):
    # Prepare messages with system prompt
    messages = [{"role": "system", "content": system_message}]
    for msg in history:
        messages.append(msg)
    messages.append({"role": "user", "content": message})

    # Stream response from the model
    response = ""
    for chunk in client.chat.completions.create(
        model="mistralai/Mistral-7B-Instruct-v0.3",
        messages=messages,
        max_tokens=1024,
        temperature=0.7,
        top_p=0.95,
        stream=True,
    ):
        token = chunk.choices[0].delta.get("content", "") or ""
        response += token
        yield response

# Create Gradio interface
with gr.Blocks() as demo:
    chatbot = gr.Chatbot(type='messages')  # Use modern message format
    gr.ChatInterface(fn=respond, chatbot=chatbot, type="messages")  # Match format

# Launch app
if __name__ == "__main__":
    demo.launch()