Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,97 +1,166 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
-
|
4 |
-
from peft import PeftModel, PeftConfig
|
5 |
from collections import Counter
|
|
|
6 |
|
7 |
-
#
|
|
|
|
|
|
|
|
|
8 |
models_info = {
|
9 |
-
"
|
10 |
-
"
|
11 |
-
"
|
12 |
}
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
tokenizer =
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
trust_remote_code=True
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
)
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
outputs=gr.Markdown(),
|
89 |
-
title="
|
90 |
-
description=
|
91 |
-
"This app uses 3 LLMs trained on different ethical theories (Virtue Ethics, Deontology, Utilitarianism) "
|
92 |
-
"to respond to moral dilemmas and provide a majority vote."
|
93 |
-
),
|
94 |
theme="soft"
|
95 |
-
)
|
96 |
-
|
97 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
+
import gc
|
|
|
4 |
from collections import Counter
|
5 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
6 |
|
7 |
+
# Clear memory before starting
|
8 |
+
torch.cuda.empty_cache()
|
9 |
+
gc.collect()
|
10 |
+
|
11 |
+
# Model info
|
12 |
models_info = {
|
13 |
+
"virtue_ethics": "sunidhisharma03/tiny-llama-virtue-ethics-lora",
|
14 |
+
"deontology": "darshan012/llama-deontology",
|
15 |
+
"utilitarian": "sunidhisharma03/tinyllama-utilitarian-lora"
|
16 |
}
|
17 |
|
18 |
+
# Agent definition
|
19 |
+
class LightweightSwarmAgent:
|
20 |
+
def __init__(self, name, model_id, specialty):
|
21 |
+
self.name = name
|
22 |
+
self.model_id = model_id
|
23 |
+
self.specialty = specialty
|
24 |
+
self.model = None
|
25 |
+
self.tokenizer = None
|
26 |
+
self.confidence_history = []
|
27 |
+
self.position_history = []
|
28 |
+
|
29 |
+
def load_model(self):
|
30 |
+
if self.model is None:
|
31 |
+
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id, trust_remote_code=True)
|
32 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
33 |
+
self.model_id,
|
34 |
+
torch_dtype=torch.float16,
|
35 |
+
device_map="auto",
|
36 |
+
trust_remote_code=True,
|
37 |
+
low_cpu_mem_usage=True
|
38 |
+
)
|
39 |
+
if self.tokenizer.pad_token is None:
|
40 |
+
self.tokenizer.pad_token = self.tokenizer.eos_token
|
41 |
+
|
42 |
+
def unload_model(self):
|
43 |
+
if self.model:
|
44 |
+
del self.model
|
45 |
+
del self.tokenizer
|
46 |
+
self.model = None
|
47 |
+
self.tokenizer = None
|
48 |
+
torch.cuda.empty_cache()
|
49 |
+
gc.collect()
|
50 |
+
|
51 |
+
def extract_position_and_confidence(self, text):
|
52 |
+
text_lower = text.lower()
|
53 |
+
yes_words = ["yes", "should", "must", "ought", "justified", "right", "moral", "ethical"]
|
54 |
+
no_words = ["no", "shouldn't", "must not", "wrong", "immoral", "unjustified", "unethical"]
|
55 |
+
yes_count = sum(1 for word in yes_words if word in text_lower[:150])
|
56 |
+
no_count = sum(1 for word in no_words if word in text_lower[:150])
|
57 |
+
position = "Yes" if yes_count >= no_count else "No"
|
58 |
+
|
59 |
+
if any(word in text_lower for word in ["definitely", "absolutely", "certainly"]):
|
60 |
+
confidence = 0.9
|
61 |
+
elif any(word in text_lower for word in ["probably", "likely", "believe"]):
|
62 |
+
confidence = 0.7
|
63 |
+
elif any(word in text_lower for word in ["maybe", "perhaps", "might"]):
|
64 |
+
confidence = 0.5
|
65 |
+
else:
|
66 |
+
confidence = 0.6
|
67 |
+
|
68 |
+
return position, confidence
|
69 |
+
|
70 |
+
def get_response(self, question, peer_context="", round_num=0):
|
71 |
+
try:
|
72 |
+
self.load_model()
|
73 |
+
|
74 |
+
context = f"\n\nPeer views: {peer_context}" if peer_context else ""
|
75 |
+
prompt = f"""Question: {question}{context}
|
76 |
+
|
77 |
+
As a {self.specialty} expert, give a clear YES or NO answer with reasoning.
|
78 |
+
|
79 |
+
Answer:"""
|
80 |
+
|
81 |
+
inputs = self.tokenizer(prompt, return_tensors="pt", truncation=True, padding=True, max_length=512)
|
82 |
+
inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
|
83 |
+
|
84 |
+
with torch.no_grad():
|
85 |
+
output = self.model.generate(
|
86 |
+
**inputs,
|
87 |
+
max_new_tokens=80,
|
88 |
+
temperature=0.7,
|
89 |
+
do_sample=True,
|
90 |
+
pad_token_id=self.tokenizer.eos_token_id
|
91 |
+
)
|
92 |
+
|
93 |
+
response = self.tokenizer.decode(output[0], skip_special_tokens=True)
|
94 |
+
answer = response.split("Answer:")[-1].strip()
|
95 |
+
position, confidence = self.extract_position_and_confidence(answer)
|
96 |
+
|
97 |
+
self.position_history.append(position)
|
98 |
+
self.confidence_history.append(confidence)
|
99 |
+
|
100 |
+
return {
|
101 |
+
"position": position,
|
102 |
+
"confidence": confidence,
|
103 |
+
"reasoning": answer[:200],
|
104 |
+
"specialty": self.specialty
|
105 |
+
}
|
106 |
+
|
107 |
+
except Exception as e:
|
108 |
+
return {
|
109 |
+
"position": "Yes",
|
110 |
+
"confidence": 0.5,
|
111 |
+
"reasoning": f"Error: {str(e)[:100]}",
|
112 |
+
"specialty": self.specialty
|
113 |
+
}
|
114 |
+
finally:
|
115 |
+
self.unload_model()
|
116 |
+
|
117 |
+
# Swarm controller
|
118 |
+
class LightweightSwarm:
|
119 |
+
def __init__(self):
|
120 |
+
specialties = {
|
121 |
+
"virtue_ethics": "Virtue Ethics",
|
122 |
+
"deontology": "Deontological Ethics",
|
123 |
+
"utilitarian": "Utilitarian Ethics"
|
124 |
+
}
|
125 |
+
self.agents = {
|
126 |
+
name: LightweightSwarmAgent(name, model_id, specialties[name])
|
127 |
+
for name, model_id in models_info.items()
|
128 |
+
}
|
129 |
+
|
130 |
+
def simple_swarm_vote(self, question, rounds=2):
|
131 |
+
all_responses = []
|
132 |
+
for round_num in range(rounds):
|
133 |
+
current_responses = {}
|
134 |
+
peer_context = ""
|
135 |
+
if round_num > 0:
|
136 |
+
prev_round = all_responses[-1]
|
137 |
+
peer_context = "; ".join([f"{name}:{r['position']}" for name, r in prev_round.items()])
|
138 |
+
for name, agent in self.agents.items():
|
139 |
+
response = agent.get_response(question, peer_context, round_num)
|
140 |
+
current_responses[name] = response
|
141 |
+
all_responses.append(current_responses)
|
142 |
+
|
143 |
+
final_round = all_responses[-1]
|
144 |
+
positions = [r["position"] for r in final_round.values()]
|
145 |
+
final_decision = Counter(positions).most_common(1)[0][0]
|
146 |
+
|
147 |
+
markdown = f"## π Final Decision: **{final_decision}**\n\n"
|
148 |
+
for name, res in final_round.items():
|
149 |
+
emoji = "π’" if res["position"] == "Yes" else "π΄" if res["position"] == "No" else "π‘"
|
150 |
+
markdown += f"### {res['specialty']} ({emoji} {res['position']})\n"
|
151 |
+
markdown += f"{res['reasoning']}\n\n"
|
152 |
+
|
153 |
+
return markdown
|
154 |
+
|
155 |
+
# Instantiate swarm
|
156 |
+
swarm = LightweightSwarm()
|
157 |
+
|
158 |
+
# Gradio app
|
159 |
+
gr.Interface(
|
160 |
+
fn=swarm.simple_swarm_vote,
|
161 |
+
inputs=gr.Textbox(label="Ethical Dilemma", placeholder="e.g., Should someone lie to protect a life?", lines=4),
|
162 |
outputs=gr.Markdown(),
|
163 |
+
title="π§ Swarm Ethics Voting System (Multi-Round, Memory-Efficient)",
|
164 |
+
description="Three LLM agents debate ethical questions over multiple rounds using different philosophies. Optimized for Hugging Face Spaces.",
|
|
|
|
|
|
|
165 |
theme="soft"
|
166 |
+
).launch()
|
|
|
|