import gradio as gr from huggingface_hub import InferenceClient import os import json ACCESS_TOKEN = os.getenv("HF_TOKEN") print("Access token loaded.") def respond( message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p, frequency_penalty, seed, provider, # Moved before custom_model custom_model, # Moved after provider model_search_term, selected_model ): print(f"Received message: {message}") print(f"History: {history}") print(f"System message: {system_message}") print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}") print(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}") print(f"Selected provider: {provider}") # Updated order print(f"Selected model (custom_model): {custom_model}") # Updated order print(f"Model search term: {model_search_term}") print(f"Selected model from radio: {selected_model}") # Initialize the Inference Client with the provider # Provider is specified during initialization, not in the method call client = InferenceClient(token=ACCESS_TOKEN, provider=provider) print(f"Hugging Face Inference Client initialized with {provider} provider.") # Convert seed to None if -1 (meaning random) if seed == -1: seed = None # Prepare messages in the format expected by the API messages = [{"role": "system", "content": system_message}] print("Initial messages array constructed.") # Add conversation history to the context for val in history: user_part = val[0] assistant_part = val[1] if user_part: messages.append({"role": "user", "content": user_part}) print(f"Added user message to context: {user_part}") if assistant_part: messages.append({"role": "assistant", "content": assistant_part}) print(f"Added assistant message to context: {assistant_part}") # Append the latest user message messages.append({"role": "user", "content": message}) print("Latest user message appended.") # Determine which model to use, prioritizing custom_model if provided model_to_use = custom_model.strip() if custom_model.strip() != "" else selected_model print(f"Model selected for inference: {model_to_use}") # Start with an empty string to build the response as tokens stream in response = "" print(f"Sending request to {provider} provider.") # Prepare parameters for the chat completion request parameters = { "max_tokens": max_tokens, "temperature": temperature, "top_p": top_p, "frequency_penalty": frequency_penalty, } if seed is not None: parameters["seed"] = seed # Use the InferenceClient for making the request try: # Create a generator for the streaming response # The provider is already set when initializing the client stream = client.chat_completion( model=model_to_use, messages=messages, stream=True, **parameters # Pass all other parameters ) # Process the streaming response for chunk in stream: if hasattr(chunk, 'choices') and len(chunk.choices) > 0: # Extract the content from the response if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'): token_text = chunk.choices[0].delta.content if token_text: print(f"Received token: {token_text}") response += token_text yield response except Exception as e: print(f"Error during inference: {e}") response += f"\nError: {str(e)}" yield response print("Completed response generation.") # GRADIO UI chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="Select a model and begin chatting", layout="panel") print("Chatbot interface created.") # Basic input components system_message_box = gr.Textbox(value="", placeholder="You are a helpful assistant.", label="System Prompt") max_tokens_slider = gr.Slider( minimum=1, maximum=4096, value=512, step=1, label="Max tokens" ) temperature_slider = gr.Slider( minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature" ) top_p_slider = gr.Slider( minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P" ) frequency_penalty_slider = gr.Slider( minimum=-2.0, maximum=2.0, value=0.0, step=0.1, label="Frequency Penalty" ) seed_slider = gr.Slider( minimum=-1, maximum=65535, value=-1, step=1, label="Seed (-1 for random)" ) # Provider selection providers_list = [ "hf-inference", # Default Hugging Face Inference "cerebras", # Cerebras provider "together", # Together AI "sambanova", # SambaNova "novita", # Novita AI "cohere", # Cohere "fireworks-ai", # Fireworks AI "hyperbolic", # Hyperbolic "nebius", # Nebius ] provider_radio = gr.Radio( choices=providers_list, value="hf-inference", label="Inference Provider", info="[View all models here](https://huggingface.co/models?inference_provider=all&pipeline_tag=text-generation&sort=trending)" ) # Custom model box custom_model_box = gr.Textbox( value="", label="Custom Model", info="(Optional) Provide a custom Hugging Face model path. Overrides any selected featured model.", placeholder="meta-llama/Llama-3.3-70B-Instruct" ) # Model selection components model_search_box = gr.Textbox( label="Filter Models", placeholder="Search for a featured model...", lines=1 ) models_list = [ "meta-llama/Llama-3.3-70B-Instruct", "meta-llama/Llama-3.1-70B-Instruct", "meta-llama/Llama-3.0-70B-Instruct", "meta-llama/Llama-3.2-3B-Instruct", "meta-llama/Llama-3.2-1B-Instruct", "meta-llama/Llama-3.1-8B-Instruct", "NousResearch/Hermes-3-Llama-3.1-8B", "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", "mistralai/Mistral-Nemo-Instruct-2407", "mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.3", "mistralai/Mistral-7B-Instruct-v0.2", "Qwen/Qwen3-235B-A22B", "Qwen/Qwen3-32B", "Qwen/Qwen2.5-72B-Instruct", "Qwen/Qwen2.5-3B-Instruct", "Qwen/Qwen2.5-0.5B-Instruct", "Qwen/QwQ-32B", "Qwen/Qwen2.5-Coder-32B-Instruct", "microsoft/Phi-3.5-mini-instruct", "microsoft/Phi-3-mini-128k-instruct", "microsoft/Phi-3-mini-4k-instruct", "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", "HuggingFaceH4/zephyr-7b-beta", "HuggingFaceTB/SmolLM2-360M-Instruct", "tiiuae/falcon-7b-instruct", "01-ai/Yi-1.5-34B-Chat", ] featured_model_radio = gr.Radio( label="Select a model below", choices=models_list, value="meta-llama/Llama-3.3-70B-Instruct", interactive=True ) def filter_models(search_term): print(f"Filtering models with search term: {search_term}") filtered = [m for m in models_list if search_term.lower() in m.lower()] print(f"Filtered models: {filtered}") return gr.update(choices=filtered) def set_custom_model_from_radio(selected): """ This function will get triggered whenever someone picks a model from the 'Featured Models' radio. We will update the Custom Model text box with that selection automatically. """ print(f"Featured model selected: {selected}") return selected # Create the Gradio interface demo = gr.ChatInterface( fn=respond, additional_inputs=[ system_message_box, max_tokens_slider, temperature_slider, top_p_slider, frequency_penalty_slider, seed_slider, provider_radio, # Provider selection (moved up) custom_model_box, # Custom Model (moved down) model_search_box, # Model search box featured_model_radio # Featured model radio ], fill_height=True, chatbot=chatbot, theme="Nymbo/Nymbo_Theme", ) print("ChatInterface object created.") with demo: # Connect the model filter to update the radio choices model_search_box.change( fn=filter_models, inputs=model_search_box, outputs=featured_model_radio ) print("Model search box change event linked.") # Connect the featured model radio to update the custom model box featured_model_radio.change( fn=set_custom_model_from_radio, inputs=featured_model_radio, outputs=custom_model_box ) print("Featured model radio button change event linked.") print("Gradio interface initialized.") if __name__ == "__main__": print("Launching the demo application.") demo.launch(show_api=True) # Fixed typo: demo. Launch -> demo.launch