import sys import json from hugchat import hugchat from hugchat.login import Login import os import re import torch from transformers import pipeline import librosa import gradio as gr # Directory to store/load cookies cookie_path_dir = "./cookies/" cookie_file_path = os.path.join(cookie_path_dir, "cookies_snapshot.json") # Default file name used by hugchat # Load pre-saved cookies try: print("Attempting to load cookies from:", cookie_file_path) if not os.path.exists(cookie_file_path): # If cookies don't exist, attempt to generate them (for local testing; remove in Spaces) EMAIL = os.environ.get("EMAIL", "fearfreed441@gmail.com") # Fallback for local testing PASSWD = os.environ.get("PASSWORD", "e.AKsv$3Q4i4KcX") # Fallback for local testing os.makedirs(cookie_path_dir, exist_ok=True) sign = Login(EMAIL, PASSWD) cookies = sign.login(cookie_dir_path=cookie_path_dir, save_cookies=True) print("Generated new cookies since none were found.") else: # Load existing cookies with open(cookie_file_path, "r") as f: cookies = json.load(f) # Load the cookie dictionary print("Cookies loaded from file.") chatbot = hugchat.ChatBot(cookies=cookies) # Pass cookies directly print("ChatBot initialized successfully.") except Exception as e: print(f"Failed to initialize ChatBot: {str(e)}") import traceback traceback.print_exc() sys.exit(1) # Model and device configuration for Whisper transcription MODEL_NAME = "openai/whisper-large-v3-turbo" device = 0 if torch.cuda.is_available() else "cpu" pipe = pipeline( task="automatic-speech-recognition", model=MODEL_NAME, chunk_length_s=30, device=device, ) def transcribe_audio(audio_path): try: audio, sr = librosa.load(audio_path, sr=16000, mono=True) transcription = pipe(audio, batch_size=8, generate_kwargs={"language": "urdu"})["text"] return transcription except Exception as e: return f"Error processing audio: {e}" def extract_metadata(file_name): base = file_name.split(".")[0] parts = base.split("_") if len(parts) >= 3: return { "agent_username": parts[0], "location": parts[-2] } return {"agent_username": "Unknown", "location": "Unknown"} def process_audio(audio, file_name): urdu_text = transcribe_audio(audio) if "Error" in urdu_text: return json.dumps({"error": urdu_text}) metadata = extract_metadata(file_name) location = metadata["location"] english_text = chatbot.chat( f"The following Urdu text is about crops and their diseases, but it may contain errors or misheard words due to audio transcription issues. Please use context to infer the most likely correct crop names and disease terms, and then translate the text to English:\n\n{urdu_text}" ).wait_until_done() extraction_prompt = f""" Below is an English text about specific crops and possible diseases/pests: {english_text} Identify each specific Crop (like wheat, rice, cotton, etc.) mentioned and list any Diseases or Pests affecting that crop. - If a disease or pest is mentioned without specifying a particular crop, list it under "No crop:". - If a crop is mentioned but no diseases or pests are specified for it, include it with an empty diseases list. - Do not include general terms like "crops" as a specific crop name. Format your answer in this style (one entry at a time): For specific crops with diseases: 1. CropName: Diseases: - DiseaseName - AnotherDisease For specific crops with no diseases: 2. NextCrop: Diseases: For standalone diseases: 3. No crop: Diseases: - StandaloneDisease No extra text, just the structured bullet list. """ extraction_response = chatbot.chat(extraction_prompt).wait_until_done() lines = extraction_response.splitlines() crops_and_diseases = [] current_crop = None current_diseases = [] for line in lines: line = line.strip() if not line: continue match_crop = re.match(r'^(\d+)\.\s*(.+?):$', line) if match_crop: if current_crop is not None or current_diseases: crops_and_diseases.append({ "crop": current_crop, "diseases": current_diseases }) crop_name = match_crop.group(2).strip() if crop_name.lower() in ["no crop", "crops", "general crops"]: current_crop = None else: current_crop = crop_name current_diseases = [] continue if line.lower().startswith("diseases:"): continue if line.startswith('-'): disease_name = line.lstrip('-').strip() if disease_name: current_diseases.append(disease_name) if current_crop is not None or current_diseases: crops_and_diseases.append({ "crop": current_crop, "diseases": current_diseases }) temp_prompt = f"Give me weather of {location} in Celsius numeric only." temperature_response = chatbot.chat(temp_prompt).wait_until_done() temperature = None temp_match = re.search(r'(\d+)', temperature_response) if temp_match: temperature = int(temp_match.group(1)) output = { "urdu_text": urdu_text, "english_text": english_text, "crops_and_diseases": crops_and_diseases, "temperature": temperature, "location": location } return json.dumps(output) with gr.Blocks(title="Audio to Crop Disease API") as interface: gr.Markdown("## Upload Audio to Get Urdu Transcription, English Translation, and Crop Diseases") with gr.Row(): audio_input = gr.Audio(type="filepath", label="Upload Audio File (Urdu)") file_name_input = gr.Textbox(label="File Name for Metadata (e.g., agent2_5_Multan_Pakistan.mp3)", placeholder="Enter file name") with gr.Row(): output_json = gr.JSON(label="Output (Urdu, English, Crops with Diseases)") process_button = gr.Button("Process Audio") process_button.click( fn=process_audio, inputs=[audio_input, file_name_input], outputs=[output_json], ) if __name__ == "__main__": interface.launch()