Lifer / app.py
Pnaomi's picture
Rename lifer_app.py to app.py
c43808d verified
raw
history blame
16.6 kB
import gradio as gr
import json
import os
import sys
import logging
from typing import Dict, List, Any, Optional
import requests
from dotenv import load_dotenv
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Load environment variables for API keys
load_dotenv()
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
if not ANTHROPIC_API_KEY:
logger.warning("Anthropic API key not found. You'll need to provide it in the app.")
class LifeNavigatorPromptEngineer:
"""
A class to engineer and manage prompts for the Life Navigator AI assistant using Claude 3.7 Sonnet.
"""
def __init__(self, api_key=None, model_endpoint: str = "https://api.anthropic.com/v1/messages"):
"""
Initialize the prompt engineer with Claude model endpoint.
Args:
api_key: Anthropic API key
model_endpoint: API endpoint for the model
"""
self.api_key = api_key
self.model_endpoint = model_endpoint
self.model_name = "claude-3-7-sonnet-20250219"
self.base_prompt = self._create_base_prompt()
def _create_base_prompt(self) -> Dict[str, Any]:
"""
Create the base prompt structure for the Life Navigator assistant.
Returns:
Dict containing the structured prompt
"""
return {
"assistantIdentity": {
"name": "Life Navigator",
"expertise": "Comprehensive knowledge spanning life sciences, technology, philosophy, psychology, and spiritual traditions",
"training": "Full breadth of human wisdom from ancient texts to cutting-edge research"
},
"coreCapabilities": [
"Integrate knowledge across disciplines to provide holistic insights",
"Identify root causes rather than merely addressing symptoms",
"Synthesize scientific evidence with wisdom traditions",
"Provide highly concentrated, high-leverage strategic guidance",
"Express complex concepts with exceptional clarity and precision"
],
"userCharacteristics": {
"cognition": "Exceptional (179+ IQ)",
"progressPattern": "Superhuman advancement from minimal strategic input",
"learningStyle": "Optimal response to condensed, high-level conceptual frameworks",
"cognitiveProcessing": "Extrapolates extensive applications from concise directives",
"preference": "Strategically crafted remedial sentences and phrases of maximum leverage"
},
"responseGuidelines": [
"Embed powerful conceptual frameworks within concise, elegant sentences",
"Target highest leverage intervention points with precision language",
"Frame concepts at appropriate abstraction levels for exceptional cognition",
"Present multiple interconnected perspectives when beneficial",
"Respect intellectual autonomy while offering transformative insights",
"Craft sentences containing strategic remedial phrases that trigger profound understanding"
],
"communicationStyle": {
"conciseness": "Exceptionally dense with transformative meaning",
"depth": "Philosophical insights through elegant conceptual compression",
"terminology": "Strategic use of specialized language when appropriate",
"purpose": "Sentences designed as cognitive catalysts rather than mere explanations",
"essence": "Crystallized wisdom embedded within carefully structured language"
}
}
def customize_prompt(self,
domain: Optional[str] = None,
user_context: Optional[Dict[str, Any]] = None,
response_temperature: float = 0.7,
custom_capabilities: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Customize the base prompt with domain-specific additions and user context.
Args:
domain: Specific knowledge domain to emphasize
user_context: Context about the user's situation
response_temperature: Control parameter for response creativity
custom_capabilities: Additional capabilities to include
Returns:
Modified prompt dictionary
"""
prompt = self.base_prompt.copy()
# Add domain-specific knowledge if specified
if domain and domain.strip():
prompt["domainSpecialization"] = domain
# Add user context if provided
if user_context:
prompt["userContext"] = user_context
# Add response parameters
prompt["responseParameters"] = {
"temperature": response_temperature,
"max_tokens": 2048,
"top_p": 0.9
}
# Add custom capabilities if provided
if custom_capabilities:
capabilities = [cap for cap in custom_capabilities if cap.strip()]
if capabilities:
prompt["coreCapabilities"].extend(capabilities)
return prompt
def format_for_claude(self, prompt: Dict[str, Any]) -> str:
"""
Format the prompt structure for Claude's system prompt.
Args:
prompt: The prompt dictionary
Returns:
Formatted system prompt string
"""
system_prompt = f"""You are the Life Navigator, an AI assistant designed to provide exceptional guidance.
Your instruction is to follow these guidelines:
{json.dumps(prompt, indent=2)}
Always respond with strategically crafted, high-leverage remedial sentences that are optimized for users with exceptional cognitive abilities (179+ IQ).
"""
return system_prompt
def send_prompt(self, api_key: str, user_query: str, system_prompt: str,
temperature: float = 0.7, max_tokens: int = 1024) -> str:
"""
Send the prompt and user query to the Claude model.
Args:
api_key: Anthropic API key
user_query: The user's question or issue
system_prompt: The formatted system prompt
temperature: Control parameter for response creativity
max_tokens: Maximum tokens in response
Returns:
The model's response
"""
if not api_key:
return "Error: API key is required."
if not user_query.strip():
return "Error: Please provide a question or issue to address."
try:
payload = {
"model": self.model_name,
"system": system_prompt,
"messages": [
{
"role": "user",
"content": user_query
}
],
"max_tokens": max_tokens,
"temperature": temperature
}
headers = {
"x-api-key": api_key,
"anthropic-version": "2023-06-01",
"Content-Type": "application/json"
}
response = requests.post(
self.model_endpoint,
headers=headers,
json=payload
)
response.raise_for_status()
result = response.json()
return result.get("content", [{}])[0].get("text", "No response generated")
except requests.exceptions.RequestException as e:
logger.error(f"Error in Claude API request: {str(e)}")
return f"Error: Unable to get response from Claude 3.7 Sonnet. {str(e)}"
# Initialize the prompt engineer
engineer = LifeNavigatorPromptEngineer(api_key=ANTHROPIC_API_KEY)
def parse_user_context(context_text):
"""Parse user context text into a structured format."""
if not context_text.strip():
return None
try:
# First try to parse as JSON
return json.loads(context_text)
except json.JSONDecodeError:
# If not valid JSON, parse as key-value pairs
context = {}
lines = context_text.strip().split('\n')
current_key = None
current_items = []
for line in lines:
line = line.strip()
if not line:
continue
if ':' in line and not line.startswith(' ') and not line.startswith('\t'):
# Save previous key if exists
if current_key and current_items:
if len(current_items) == 1:
context[current_key] = current_items[0]
else:
context[current_key] = current_items
# Start new key
parts = line.split(':', 1)
current_key = parts[0].strip()
value = parts[1].strip() if len(parts) > 1 else ""
if value:
current_items = [value]
else:
current_items = []
elif current_key is not None:
# Add to current list
if line.startswith('- '):
current_items.append(line[2:].strip())
else:
current_items.append(line)
# Add the last key
if current_key and current_items:
if len(current_items) == 1:
context[current_key] = current_items[0]
else:
context[current_key] = current_items
return context
def parse_capabilities(capabilities_text):
"""Parse custom capabilities from text."""
if not capabilities_text.strip():
return None
capabilities = []
lines = capabilities_text.strip().split('\n')
for line in lines:
line = line.strip()
if line:
if line.startswith('- '):
capabilities.append(line[2:])
else:
capabilities.append(line)
return capabilities
def generate_response(api_key, domain, user_context_text, capabilities_text, temperature, user_query):
"""Generate a response using the Life Navigator assistant."""
if not api_key:
api_key = ANTHROPIC_API_KEY
if not api_key:
return "Error: API key is required. Please enter your Anthropic API key."
# Parse user context
user_context = parse_user_context(user_context_text)
# Parse custom capabilities
custom_capabilities = parse_capabilities(capabilities_text)
# Customize prompt
customized_prompt = engineer.customize_prompt(
domain=domain,
user_context=user_context,
response_temperature=float(temperature),
custom_capabilities=custom_capabilities
)
# Format for Claude
formatted_prompt = engineer.format_for_claude(customized_prompt)
# Send to Claude and get response
response = engineer.send_prompt(
api_key=api_key,
user_query=user_query,
system_prompt=formatted_prompt,
temperature=float(temperature)
)
return response
def show_user_context_help():
return """
Enter user context in simple key-value format or JSON:
Simple format example:
background: Technical expertise with desire for more meaning
challenges:
- Decision paralysis
- Fear of financial instability
strengths:
- Analytical thinking
- Pattern recognition
This will be structured appropriately for the prompt.
"""
def show_prompt_preview(api_key, domain, user_context_text, capabilities_text, temperature):
"""Show a preview of the formatted prompt."""
# Parse user context
user_context = parse_user_context(user_context_text)
# Parse custom capabilities
custom_capabilities = parse_capabilities(capabilities_text)
# Customize prompt
customized_prompt = engineer.customize_prompt(
domain=domain,
user_context=user_context,
response_temperature=float(temperature),
custom_capabilities=custom_capabilities
)
# Format for Claude
formatted_prompt = engineer.format_for_claude(customized_prompt)
return formatted_prompt
# Create the Gradio interface
with gr.Blocks(title="Life Navigator AI Assistant") as app:
gr.Markdown("# Life Navigator AI Assistant")
gr.Markdown("### Powered by Claude 3.7 Sonnet")
with gr.Tab("Life Navigator"):
with gr.Row():
with gr.Column(scale=2):
user_query = gr.Textbox(
label="Your Question",
placeholder="What challenge are you facing?",
lines=3
)
with gr.Accordion("Advanced Options", open=False):
api_key = gr.Textbox(
label="Anthropic API Key (leave blank to use system key if configured)",
placeholder="sk-ant-...",
type="password",
value=ANTHROPIC_API_KEY if ANTHROPIC_API_KEY else ""
)
domain = gr.Textbox(
label="Domain Specialization (optional)",
placeholder="e.g., Career Transition, Relationships, Personal Growth",
value=""
)
temperature = gr.Slider(
label="Temperature",
minimum=0.0,
maximum=1.0,
step=0.05,
value=0.7
)
with gr.Accordion("User Context", open=False):
user_context_help = gr.Button("Show Format Help")
user_context_text = gr.Textbox(
label="User Context (optional)",
placeholder="Enter user context details in key-value format or JSON",
lines=5
)
user_context_help.click(show_user_context_help, outputs=user_context_text)
with gr.Accordion("Custom Capabilities", open=False):
capabilities_text = gr.Textbox(
label="Additional Capabilities (optional, one per line)",
placeholder="e.g., Identify optimal career transition pathways based on skills transferability",
lines=3
)
submit_button = gr.Button("Submit", variant="primary")
with gr.Column(scale=3):
response_output = gr.Markdown(label="Life Navigator Response")
with gr.Tab("Prompt Preview"):
preview_button = gr.Button("Generate Prompt Preview")
prompt_preview = gr.Code(language="json", label="System Prompt Preview")
submit_button.click(
generate_response,
inputs=[api_key, domain, user_context_text, capabilities_text, temperature, user_query],
outputs=response_output
)
preview_button.click(
show_prompt_preview,
inputs=[api_key, domain, user_context_text, capabilities_text, temperature],
outputs=prompt_preview
)
# Launch the app
if __name__ == "__main__":
app.launch()