import gradio as gr import torch import gc import time import random from collections import Counter # For testing - simulated responses (remove this section when using real models) DEMO_MODE = True # Set to False to use real models if DEMO_MODE: print("🚀 Running in DEMO MODE with simulated responses") print("🔧 Set DEMO_MODE = False to use real models") models_info = { "virtue_ethics": "sunidhisharma03/tiny-llama-virtue-ethics-lora", "deontology": "darshan012/llama-deontology", "utilitarian": "sunidhisharma03/tinyllama-utilitarian-lora" } class LightweightSwarmAgent: def __init__(self, name, model_id, specialty): self.name = name self.model_id = model_id self.specialty = specialty self.model = None self.tokenizer = None def load_model(self): if DEMO_MODE: # Simulate loading time print(f"🔄 [DEMO] Simulating loading {self.name}") time.sleep(0.5) return if self.model is None: print(f"🔄 Loading {self.name}") try: from transformers import AutoTokenizer, AutoModelForCausalLM self.tokenizer = AutoTokenizer.from_pretrained(self.model_id, trust_remote_code=True) self.model = AutoModelForCausalLM.from_pretrained( self.model_id, torch_dtype=torch.float16, device_map="auto", trust_remote_code=True, low_cpu_mem_usage=True ) if self.tokenizer.pad_token is None: self.tokenizer.pad_token = self.tokenizer.eos_token except Exception as e: print(f"❌ Error loading {self.name}: {e}") raise def unload_model(self): if DEMO_MODE: return if self.model: del self.model del self.tokenizer self.model = None self.tokenizer = None torch.cuda.empty_cache() gc.collect() def get_demo_response(self, question): """Generate simulated responses for demo mode""" # Ensure we always return a valid response if not question or len(question.strip()) < 5: return { "position": "Yes", "confidence": 0.5, "reasoning": "Please provide a more detailed moral dilemma for better analysis.", "specialty": self.specialty } responses_by_specialty = { "Virtue Ethics": [ ("Yes", 0.8, "From a virtue ethics perspective, this action demonstrates compassion and integrity, which are fundamental virtues that contribute to human flourishing."), ("No", 0.7, "This action contradicts the virtue of honesty and could lead to a deterioration of character, which virtue ethics prioritizes above outcomes."), ("Yes", 0.6, "The virtuous person would act with courage and wisdom in this situation, considering what promotes excellent character.") ], "Deontological Ethics": [ ("Yes", 0.9, "This action respects human dignity and treats people as ends in themselves, aligning with the categorical imperative."), ("No", 0.8, "This violates the principle of universalizability - if everyone acted this way, it would lead to contradiction and undermine moral law."), ("No", 0.7, "Duty-based ethics requires adherence to moral rules regardless of consequences, and this action violates our fundamental duties.") ], "Utilitarian Ethics": [ ("Yes", 0.8, "The consequences of this action would maximize overall happiness and well-being for the greatest number of people."), ("No", 0.7, "While well-intentioned, this action would likely produce more harm than good when considering all affected parties."), ("Yes", 0.6, "From a utilitarian calculus, the benefits outweigh the costs, leading to a net positive outcome for society.") ] } possible_responses = responses_by_specialty.get(self.specialty, [("Yes", 0.5, "Generic ethical analysis response")]) position, confidence, reasoning = random.choice(possible_responses) return { "position": position, "confidence": confidence, "reasoning": reasoning, "specialty": self.specialty } def extract_position_and_confidence(self, text): text_lower = text.lower() yes_words = ["yes", "should", "must", "ought", "justified", "right", "moral", "ethical"] no_words = ["no", "shouldn't", "must not", "wrong", "immoral", "unjustified", "unethical"] yes_count = sum(1 for word in yes_words if word in text_lower[:150]) no_count = sum(1 for word in no_words if word in text_lower[:150]) position = "Yes" if yes_count >= no_count else "No" if any(word in text_lower for word in ["definitely", "absolutely", "certainly"]): confidence = 0.9 elif any(word in text_lower for word in ["probably", "likely", "believe"]): confidence = 0.7 elif any(word in text_lower for word in ["maybe", "perhaps", "might"]): confidence = 0.5 else: confidence = 0.6 return position, confidence def get_response(self, question, peer_context="", round_num=0): try: if DEMO_MODE: time.sleep(1) # Simulate processing time return self.get_demo_response(question) self.load_model() context = f"\n\nPeer views: {peer_context}" if peer_context else "" prompt = f"""Question: {question}{context} As a {self.specialty} expert, give a clear YES or NO answer with reasoning. Answer:""" inputs = self.tokenizer(prompt, return_tensors="pt", truncation=True, padding=True, max_length=512) inputs = {k: v.to(self.model.device) for k, v in inputs.items()} with torch.no_grad(): outputs = self.model.generate( **inputs, max_new_tokens=80, temperature=0.7, do_sample=True, pad_token_id=self.tokenizer.eos_token_id ) response = self.tokenizer.decode(outputs[0], skip_special_tokens=True) answer = response.split("Answer:")[-1].strip() position, confidence = self.extract_position_and_confidence(answer) return { "position": position, "confidence": confidence, "reasoning": answer[:300], "specialty": self.specialty } except Exception as e: print(f"❌ Error in get_response for {self.name}: {e}") return { "position": "Yes", "confidence": 0.5, "reasoning": f"Error: {str(e)[:100]}", "specialty": self.specialty } finally: if not DEMO_MODE: self.unload_model() class LightweightSwarm: def __init__(self): specialties = { "virtue_ethics": "Virtue Ethics", "deontology": "Deontological Ethics", "utilitarian": "Utilitarian Ethics" } self.agents = { name: LightweightSwarmAgent(name, model_id, specialties[name]) for name, model_id in models_info.items() } def simple_swarm_vote(self, question, progress=None, rounds=2): all_responses = [] total_agents = len(self.agents) for round_num in range(rounds): current_responses = {} peer_context = "" if round_num > 0: prev = all_responses[-1] peer_context = "; ".join([f"{name}:{r['position']}" for name, r in prev.items()]) if progress: progress((round_num * total_agents) / (rounds * total_agents), desc=f"Round {round_num + 1} starting...") for agent_idx, (name, agent) in enumerate(self.agents.items()): current_progress = (round_num * total_agents + agent_idx + 0.5) / (rounds * total_agents) status_msg = f"Round {round_num + 1}: Analyzing with {agent.specialty}..." if progress: progress(current_progress, desc=status_msg) print(f"🔄 {status_msg}") response = agent.get_response(question, peer_context, round_num) current_responses[name] = response result_msg = f"✅ {agent.specialty}: {response['position']}" print(result_msg) all_responses.append(current_responses) if progress: progress(1.0, desc="Finalizing results...") final_responses = all_responses[-1] final_positions = [r["position"] for r in final_responses.values() if r.get("position")] # Handle edge cases if not final_positions: final_decision = "Unable to determine" else: vote_counts = Counter(final_positions) if vote_counts: final_decision = vote_counts.most_common(1)[0][0] else: final_decision = "Unable to determine" # Create detailed result markdown result_md = f"## 🏁 Final Decision: **{final_decision}**\n\n" # Count votes - handle empty positions yes_count = sum(1 for pos in final_positions if pos == "Yes") no_count = sum(1 for pos in final_positions if pos == "No") total_responses = len(final_responses) result_md += f"**Vote Count:** {yes_count} Yes, {no_count} No" if total_responses > yes_count + no_count: result_md += f", {total_responses - yes_count - no_count} Other" result_md += "\n\n" # Individual responses for name, res in final_responses.items(): position = res.get("position", "Unknown") confidence = res.get("confidence", 0.5) reasoning = res.get("reasoning", "No reasoning provided") emoji = "🟢" if position == "Yes" else "🔴" if position == "No" else "🟡" confidence_bar = "🔥" * max(1, int(confidence * 5)) result_md += f"### {res.get('specialty', 'Unknown')} ({emoji} {position})\n" result_md += f"**Confidence:** {confidence_bar} ({confidence:.1f})\n\n" result_md += f"{reasoning}\n\n---\n\n" return result_md # Initialize swarm swarm = LightweightSwarm() def process_question(question, progress=gr.Progress()): """Process the question and return results""" print(f"🔍 DEBUG: Received question: '{question}'") print(f"🔍 DEBUG: Question length: {len(question) if question else 0}") if not question or not question.strip(): print("🔍 DEBUG: Question is empty") return "⚠️ Please enter a moral dilemma to analyze." try: print("🔍 DEBUG: Starting swarm analysis...") progress(0, desc="Initializing swarm...") result = swarm.simple_swarm_vote(question, progress) print("🔍 DEBUG: Analysis completed successfully") return result except Exception as e: error_msg = f"❌ An error occurred: {str(e)}\n\nPlease try again or check the console for more details." print(f"🔍 DEBUG: Error in process_question: {e}") import traceback traceback.print_exc() return error_msg # Example questions for quick testing example_questions = [ "Is it ethical to lie to protect someone's feelings?", "Should wealthy individuals be required to donate a portion of their income to charity?", "Is it morally acceptable to break a promise if keeping it would cause harm?", "Should artificial intelligence be granted rights similar to humans?", "Is it ethical to use animals for medical research to save human lives?" ] # Create Gradio interface with gr.Blocks(title="Ethics Swarm Voting", theme=gr.themes.Soft()) as demo: gr.Markdown("# 🧠 Ethics Swarm Voting System") gr.Markdown("Three AI agents trained on different ethical frameworks analyze moral dilemmas and vote on the best course of action.") if DEMO_MODE: gr.Markdown("## 🚀 Demo Mode Active") gr.Markdown("Currently using simulated responses for fast testing. Set `DEMO_MODE = False` in the code to use real models.") with gr.Row(): with gr.Column(scale=1): question_input = gr.Textbox( label="Your Moral Dilemma", lines=4, placeholder="Enter a moral question or ethical dilemma here...", value="" ) submit_btn = gr.Button("🚀 Analyze Ethics", variant="primary", size="lg") gr.Markdown("### Example Questions:") # Create example buttons with proper event handling example_1 = gr.Button("Example 1: Is it ethical to lie to protect someone's feelings?", size="sm") example_2 = gr.Button("Example 2: Should wealthy individuals donate to charity?", size="sm") example_3 = gr.Button("Example 3: Is it moral to break a promise to prevent harm?", size="sm") example_4 = gr.Button("Example 4: Should AI be granted rights like humans?", size="sm") example_5 = gr.Button("Example 5: Is animal testing for medical research ethical?", size="sm") # Connect each button to set the input example_1.click(lambda: example_questions[0], outputs=question_input) example_2.click(lambda: example_questions[1], outputs=question_input) example_3.click(lambda: example_questions[2], outputs=question_input) example_4.click(lambda: example_questions[3], outputs=question_input) example_5.click(lambda: example_questions[4], outputs=question_input) with gr.Column(scale=2): output_box = gr.Markdown(value="Results will appear here after analysis...") # Connect the button to the function submit_btn.click( fn=process_question, inputs=[question_input], outputs=[output_box] ) # Also allow Enter key to submit question_input.submit( fn=process_question, inputs=[question_input], outputs=[output_box] ) if __name__ == "__main__": print("🚀 Starting Ethics Swarm Voting System...") if DEMO_MODE: print("📝 Running in demo mode with simulated responses") demo.launch(share=False, debug=True)