# import gradio as gr # import torch # from PIL import Image # from model import CRM # from inference import generate3d # import numpy as np # # Load model # crm_path = "CRM.pth" # Make sure the model is uploaded to the Space # model = CRM(torch.load(crm_path, map_location="cpu")) # model = model.to("cuda:0" if torch.cuda.is_available() else "cpu") # def generate_3d(image_path, seed=1234, scale=5.5, step=30): # image = Image.open(image_path).convert("RGB") # np_img = np.array(image) # glb_path = generate3d(model, np_img, np_img, "cuda:0" if torch.cuda.is_available() else "cpu") # return glb_path # iface = gr.Interface( # fn=generate_3d, # inputs=gr.Image(type="filepath"), # outputs=gr.Model3D(), # title="Convolutional Reconstruction Model (CRM)", # description="Upload an image to generate a 3D model." # ) # iface.launch() #############2nd################3 # import os # import torch # import gradio as gr # from huggingface_hub import hf_hub_download # from model import CRM # Make sure this matches your model file structure # # Define model details # REPO_ID = "Mariam-Elz/CRM" # Hugging Face model repo # MODEL_FILES = { # "ccm-diffusion": "ccm-diffusion.pth", # "pixel-diffusion": "pixel-diffusion.pth", # "CRM": "CRM.pth" # } # DEVICE = "cuda" if torch.cuda.is_available() else "cpu" # # Download models from Hugging Face if not already present # MODEL_DIR = "./models" # os.makedirs(MODEL_DIR, exist_ok=True) # for name, filename in MODEL_FILES.items(): # model_path = os.path.join(MODEL_DIR, filename) # if not os.path.exists(model_path): # print(f"Downloading {filename}...") # hf_hub_download(repo_id=REPO_ID, filename=filename, local_dir=MODEL_DIR) # # Load the model # print("Loading CRM Model...") # model = CRM() # model.load_state_dict(torch.load(os.path.join(MODEL_DIR, MODEL_FILES["CRM"]), map_location=DEVICE)) # model.to(DEVICE) # model.eval() # print("✅ Model Loaded Successfully!") # # Define Gradio Interface # def predict(input_image): # with torch.no_grad(): # output = model(input_image.to(DEVICE)) # Modify based on model input format # return output.cpu() # demo = gr.Interface( # fn=predict, # inputs=gr.Image(type="pil"), # outputs=gr.Image(type="pil"), # title="Convolutional Reconstruction Model (CRM)", # description="Upload an image to generate a reconstructed output." # ) # if __name__ == "__main__": # demo.launch() ########################3rd######################3 # import torch # import gradio as gr # import requests # import os # # Download model weights from Hugging Face model repo (if not already present) # model_repo = "Mariam-Elz/CRM" # Your Hugging Face model repo # model_files = { # "ccm-diffusion.pth": "ccm-diffusion.pth", # "pixel-diffusion.pth": "pixel-diffusion.pth", # "CRM.pth": "CRM.pth", # } # os.makedirs("models", exist_ok=True) # for filename, output_path in model_files.items(): # file_path = f"models/{output_path}" # if not os.path.exists(file_path): # url = f"https://huggingface.co/{model_repo}/resolve/main/{filename}" # print(f"Downloading {filename}...") # response = requests.get(url) # with open(file_path, "wb") as f: # f.write(response.content) # # Load model (This part depends on how the model is defined) # device = "cuda" if torch.cuda.is_available() else "cpu" # def load_model(): # model_path = "models/CRM.pth" # model = torch.load(model_path, map_location=device) # model.eval() # return model # model = load_model() # # Define inference function # def infer(image): # """Process input image and return a reconstructed image.""" # with torch.no_grad(): # # Assuming model expects a tensor input # image_tensor = torch.tensor(image).to(device) # output = model(image_tensor) # return output.cpu().numpy() # # Create Gradio UI # demo = gr.Interface( # fn=infer, # inputs=gr.Image(type="numpy"), # outputs=gr.Image(type="numpy"), # title="Convolutional Reconstruction Model", # description="Upload an image to get the reconstructed output." # ) # if __name__ == "__main__": # demo.launch() #################4th################## import torch import gradio as gr import requests import os # Define model repo model_repo = "Mariam-Elz/CRM" # Define model files and download paths model_files = { "CRM.pth": "models/CRM.pth" } os.makedirs("models", exist_ok=True) # Download model files only if they don't exist for filename, output_path in model_files.items(): if not os.path.exists(output_path): url = f"https://huggingface.co/{model_repo}/resolve/main/{filename}" print(f"Downloading {filename}...") response = requests.get(url) with open(output_path, "wb") as f: f.write(response.content) # Load model with low memory usage def load_model(): model_path = "models/CRM.pth" model = torch.load(model_path, map_location="cpu") # Load on CPU to reduce memory usage model.eval() return model model = load_model() # Define inference function def infer(image): """Process input image and return a reconstructed image.""" with torch.no_grad(): image_tensor = torch.tensor(image).unsqueeze(0) # Add batch dimension image_tensor = image_tensor.to("cpu") # Keep on CPU to save memory output = model(image_tensor) return output.squeeze(0).numpy() # Create Gradio UI demo = gr.Interface( fn=infer, inputs=gr.Image(type="numpy"), outputs=gr.Image(type="numpy"), title="Convolutional Reconstruction Model", description="Upload an image to get the reconstructed output." ) if __name__ == "__main__": demo.launch()