import os import gradio as gr import spaces # Create a simple version for quick testing def main(): with gr.Blocks(title="Vision 2030 Assistant - Debugging") as interface: gr.Markdown("# Vision 2030 Assistant - System Check") gr.Markdown("This interface tests your system configuration to ensure all components are working.") # Check files tab with gr.Tab("File Check"): gr.Markdown("### Check PDF Files and Directory Structure") check_btn = gr.Button("Check Files") files_output = gr.Textbox(label="Files Status", lines=10) def check_files(): results = [] # List files results.append("Files in directory:") files = os.listdir(".") results.append("\n".join(files)) # Check PDFs results.append("\nPDF Status:") for pdf_file in ["saudi_vision203.pdf", "saudi_vision2030_ar.pdf"]: if os.path.exists(pdf_file): size = os.path.getsize(pdf_file) / (1024 * 1024) # Size in MB results.append(f"{pdf_file}: Found ({size:.2f} MB)") else: results.append(f"{pdf_file}: Not found") return "\n".join(results) check_btn.click(check_files, inputs=[], outputs=[files_output]) # Check dependencies tab with gr.Tab("Dependency Check"): gr.Markdown("### Check Required Dependencies") dep_btn = gr.Button("Check Dependencies") dep_output = gr.Textbox(label="Dependency Status", lines=20) @spaces.GPU def check_dependencies(): results = [] # Basic dependencies for lib_name in [ "torch", "transformers", "sentencepiece", "accelerate", "langchain", "langchain_community", "PyPDF2" ]: try: module = __import__(lib_name) if hasattr(module, "__version__"): results.append(f"✓ {lib_name}: {module.__version__}") else: results.append(f"✓ {lib_name}: Installed (no version info)") except ImportError: results.append(f"✗ {lib_name}: Not installed") except Exception as e: results.append(f"? {lib_name}: Error - {str(e)}") # Test GPU access try: import torch results.append(f"\nGPU status:") results.append(f"CUDA available: {torch.cuda.is_available()}") if torch.cuda.is_available(): results.append(f"CUDA device count: {torch.cuda.device_count()}") results.append(f"CUDA current device: {torch.cuda.current_device()}") results.append(f"CUDA device name: {torch.cuda.get_device_name(0)}") except Exception as e: results.append(f"GPU status check error: {str(e)}") return "\n".join(results) dep_btn.click(check_dependencies, inputs=[], outputs=[dep_output]) # Test tokenizer tab with gr.Tab("Model Check"): gr.Markdown("### Test Model Loading") model_btn = gr.Button("Test Tokenizer Only") model_output = gr.Textbox(label="Model Status", lines=15) @spaces.GPU def test_tokenizer(): results = [] try: results.append("Testing AutoTokenizer...") from transformers import AutoTokenizer # Check if accelerate is available try: import accelerate results.append(f"Accelerate version: {accelerate.__version__}") except ImportError: results.append("Accelerate is not installed") # Check if sentencepiece is available try: import sentencepiece results.append("SentencePiece is installed") except ImportError: results.append("SentencePiece is not installed") # Try loading just the tokenizer model_name = "ALLaM-AI/ALLaM-7B-Instruct-preview" tokenizer = AutoTokenizer.from_pretrained( model_name, trust_remote_code=True, use_fast=False ) results.append(f"✓ Successfully loaded tokenizer for {model_name}") # Test tokenization tokens = tokenizer("Hello, this is a test", return_tensors="pt") results.append(f"✓ Tokenizer works properly") results.append(f"Input IDs: {tokens.input_ids.shape}") except Exception as e: results.append(f"✗ Error: {str(e)}") import traceback results.append(traceback.format_exc()) return "\n".join(results) model_btn.click(test_tokenizer, inputs=[], outputs=[model_output]) interface.launch() if __name__ == "__main__": main()