# import gradio as gr # import torch # from PIL import Image # from model import CRM # from inference import generate3d # import numpy as np # # Load model # crm_path = "CRM.pth" # Make sure the model is uploaded to the Space # model = CRM(torch.load(crm_path, map_location="cpu")) # model = model.to("cuda:0" if torch.cuda.is_available() else "cpu") # def generate_3d(image_path, seed=1234, scale=5.5, step=30): # image = Image.open(image_path).convert("RGB") # np_img = np.array(image) # glb_path = generate3d(model, np_img, np_img, "cuda:0" if torch.cuda.is_available() else "cpu") # return glb_path # iface = gr.Interface( # fn=generate_3d, # inputs=gr.Image(type="filepath"), # outputs=gr.Model3D(), # title="Convolutional Reconstruction Model (CRM)", # description="Upload an image to generate a 3D model." # ) # iface.launch() #############2nd################3 # import os # import torch # import gradio as gr # from huggingface_hub import hf_hub_download # from model import CRM # Make sure this matches your model file structure # # Define model details # REPO_ID = "Mariam-Elz/CRM" # Hugging Face model repo # MODEL_FILES = { # "ccm-diffusion": "ccm-diffusion.pth", # "pixel-diffusion": "pixel-diffusion.pth", # "CRM": "CRM.pth" # } # DEVICE = "cuda" if torch.cuda.is_available() else "cpu" # # Download models from Hugging Face if not already present # MODEL_DIR = "./models" # os.makedirs(MODEL_DIR, exist_ok=True) # for name, filename in MODEL_FILES.items(): # model_path = os.path.join(MODEL_DIR, filename) # if not os.path.exists(model_path): # print(f"Downloading {filename}...") # hf_hub_download(repo_id=REPO_ID, filename=filename, local_dir=MODEL_DIR) # # Load the model # print("Loading CRM Model...") # model = CRM() # model.load_state_dict(torch.load(os.path.join(MODEL_DIR, MODEL_FILES["CRM"]), map_location=DEVICE)) # model.to(DEVICE) # model.eval() # print("✅ Model Loaded Successfully!") # # Define Gradio Interface # def predict(input_image): # with torch.no_grad(): # output = model(input_image.to(DEVICE)) # Modify based on model input format # return output.cpu() # demo = gr.Interface( # fn=predict, # inputs=gr.Image(type="pil"), # outputs=gr.Image(type="pil"), # title="Convolutional Reconstruction Model (CRM)", # description="Upload an image to generate a reconstructed output." # ) # if __name__ == "__main__": # demo.launch() ########################3rd-MAIN######################3 # import torch # import gradio as gr # import requests # import os # # Download model weights from Hugging Face model repo (if not already present) # model_repo = "Mariam-Elz/CRM" # Your Hugging Face model repo # model_files = { # "ccm-diffusion.pth": "ccm-diffusion.pth", # "pixel-diffusion.pth": "pixel-diffusion.pth", # "CRM.pth": "CRM.pth", # } # os.makedirs("models", exist_ok=True) # for filename, output_path in model_files.items(): # file_path = f"models/{output_path}" # if not os.path.exists(file_path): # url = f"https://huggingface.co/{model_repo}/resolve/main/{filename}" # print(f"Downloading {filename}...") # response = requests.get(url) # with open(file_path, "wb") as f: # f.write(response.content) # # Load model (This part depends on how the model is defined) # device = "cuda" if torch.cuda.is_available() else "cpu" # def load_model(): # model_path = "models/CRM.pth" # model = torch.load(model_path, map_location=device) # model.eval() # return model # model = load_model() # # Define inference function # def infer(image): # """Process input image and return a reconstructed image.""" # with torch.no_grad(): # # Assuming model expects a tensor input # image_tensor = torch.tensor(image).to(device) # output = model(image_tensor) # return output.cpu().numpy() # # Create Gradio UI # demo = gr.Interface( # fn=infer, # inputs=gr.Image(type="numpy"), # outputs=gr.Image(type="numpy"), # title="Convolutional Reconstruction Model", # description="Upload an image to get the reconstructed output." # ) # if __name__ == "__main__": # demo.launch() #################4th################## # import torch # import gradio as gr # import requests # import os # # Define model repo # model_repo = "Mariam-Elz/CRM" # # Define model files and download paths # model_files = { # "CRM.pth": "models/CRM.pth" # } # os.makedirs("models", exist_ok=True) # # Download model files only if they don't exist # for filename, output_path in model_files.items(): # if not os.path.exists(output_path): # url = f"https://huggingface.co/{model_repo}/resolve/main/{filename}" # print(f"Downloading {filename}...") # response = requests.get(url) # with open(output_path, "wb") as f: # f.write(response.content) # # Load model with low memory usage # def load_model(): # model_path = "models/CRM.pth" # model = torch.load(model_path, map_location="cpu") # Load on CPU to reduce memory usage # model.eval() # return model # model = load_model() # # Define inference function # def infer(image): # """Process input image and return a reconstructed image.""" # with torch.no_grad(): # image_tensor = torch.tensor(image).unsqueeze(0) # Add batch dimension # image_tensor = image_tensor.to("cpu") # Keep on CPU to save memory # output = model(image_tensor) # return output.squeeze(0).numpy() # # Create Gradio UI # demo = gr.Interface( # fn=infer, # inputs=gr.Image(type="numpy"), # outputs=gr.Image(type="numpy"), # title="Convolutional Reconstruction Model", # description="Upload an image to get the reconstructed output." # ) # if __name__ == "__main__": # demo.launch() # ##############5TH################# # import torch # import torch.nn as nn # import gradio as gr # import requests # import os # # Define model repo # model_repo = "Mariam-Elz/CRM" # # Define model files and download paths # model_files = { # "CRM.pth": "models/CRM.pth" # } # os.makedirs("models", exist_ok=True) # # Download model files only if they don't exist # for filename, output_path in model_files.items(): # if not os.path.exists(output_path): # url = f"https://huggingface.co/{model_repo}/resolve/main/{filename}" # print(f"Downloading {filename}...") # response = requests.get(url) # with open(output_path, "wb") as f: # f.write(response.content) # # Define the model architecture (you MUST replace this with your actual model) # class CRM_Model(nn.Module): # def __init__(self): # super(CRM_Model, self).__init__() # self.layer1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) # self.relu = nn.ReLU() # self.layer2 = nn.Conv2d(64, 3, kernel_size=3, padding=1) # def forward(self, x): # x = self.layer1(x) # x = self.relu(x) # x = self.layer2(x) # return x # # Load model with proper architecture # def load_model(): # model = CRM_Model() # Instantiate the model architecture # model_path = "models/CRM.pth" # model.load_state_dict(torch.load(model_path, map_location="cpu")) # Load weights # model.eval() # Set to evaluation mode # return model # model = load_model() # # Define inference function # def infer(image): # """Process input image and return a reconstructed image.""" # with torch.no_grad(): # image_tensor = torch.tensor(image).unsqueeze(0).permute(0, 3, 1, 2).float() / 255.0 # Convert to tensor # output = model(image_tensor) # Run through model # output = output.squeeze(0).permute(1, 2, 0).numpy() * 255.0 # Convert back to image # return output.astype("uint8") # # Create Gradio UI # demo = gr.Interface( # fn=infer, # inputs=gr.Image(type="numpy"), # outputs=gr.Image(type="numpy"), # title="Convolutional Reconstruction Model", # description="Upload an image to get the reconstructed output." # ) # if __name__ == "__main__": # demo.launch() #############6th-worked-proc################## # import torch # import gradio as gr # import requests # import os # import numpy as np # # Hugging Face Model Repository # model_repo = "Mariam-Elz/CRM" # # Download Model Weights (Only CRM.pth to Save Memory) # model_path = "models/CRM.pth" # os.makedirs("models", exist_ok=True) # if not os.path.exists(model_path): # url = f"https://huggingface.co/{model_repo}/resolve/main/CRM.pth" # print(f"Downloading CRM.pth...") # response = requests.get(url) # with open(model_path, "wb") as f: # f.write(response.content) # # Set Device (Use CPU to Reduce RAM Usage) # device = "cpu" # # Load Model Efficiently # def load_model(): # model = torch.load(model_path, map_location=device) # if isinstance(model, torch.nn.Module): # model.eval() # Ensure model is in inference mode # return model # # Load model only when needed (saves memory) # model = load_model() # # Define Inference Function with Memory Optimizations # def infer(image): # """Process input image and return a reconstructed image.""" # with torch.no_grad(): # # Convert image to torch tensor & normalize (float16 to save RAM) # image_tensor = torch.tensor(image, dtype=torch.float16).unsqueeze(0).permute(0, 3, 1, 2) / 255.0 # image_tensor = image_tensor.to(device) # # Model Inference # output = model(image_tensor) # # Convert back to numpy image format # output_image = output.squeeze(0).permute(1, 2, 0).cpu().numpy() * 255.0 # output_image = np.clip(output_image, 0, 255).astype(np.uint8) # # Free Memory # del image_tensor, output # torch.cuda.empty_cache() # return output_image # # Create Gradio UI # demo = gr.Interface( # fn=infer, # inputs=gr.Image(type="numpy"), # outputs=gr.Image(type="numpy"), # title="Optimized Convolutional Reconstruction Model", # description="Upload an image to get the reconstructed output with reduced memory usage." # ) # if __name__ == "__main__": # demo.launch() #############7tth################ import torch import torch.nn as nn import gradio as gr import requests import os import torchvision.transforms as transforms import numpy as np from PIL import Image # Hugging Face Model Repository model_repo = "Mariam-Elz/CRM" # Model File Path model_path = "models/CRM.pth" os.makedirs("models", exist_ok=True) # Download model weights if not present if not os.path.exists(model_path): url = f"https://huggingface.co/{model_repo}/resolve/main/CRM.pth" print(f"Downloading CRM.pth...") response = requests.get(url) with open(model_path, "wb") as f: f.write(response.content) # Set Device device = "cuda" if torch.cuda.is_available() else "cpu" # Define Model Architecture (Replace with your actual model) class CRMModel(nn.Module): def __init__(self): super(CRMModel, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.relu = nn.ReLU() def forward(self, x): x = self.relu(self.conv1(x)) x = self.relu(self.conv2(x)) return x # Load Model def load_model(): print("Loading model...") model = CRMModel() # Use the correct architecture here state_dict = torch.load(model_path, map_location=device) if isinstance(state_dict, dict): # Ensure it's a valid state_dict model.load_state_dict(state_dict) else: raise ValueError("Error: The loaded state_dict is not in the correct format.") model.to(device) model.eval() print("Model loaded successfully!") return model # Load the model model = load_model() # Define Inference Function def infer(image): """Process input image and return a reconstructed 3D output.""" try: print("Preprocessing image...") # Convert image to PyTorch tensor & normalize transform = transforms.Compose([ transforms.Resize((256, 256)), # Resize to fit model input transforms.ToTensor(), # Converts to tensor (C, H, W) transforms.Normalize(mean=[0.5], std=[0.5]), # Normalize ]) image_tensor = transform(image).unsqueeze(0).to(device) # Add batch dimension print("Running inference...") with torch.no_grad(): output = model(image_tensor) # Forward pass # Ensure output is a valid tensor if isinstance(output, torch.Tensor): output_image = output.squeeze(0).permute(1, 2, 0).cpu().numpy() output_image = np.clip(output_image * 255.0, 0, 255).astype(np.uint8) print("Inference complete! Returning output.") return output_image else: print("Error: Model output is not a tensor.") return None except Exception as e: print(f"Error during inference: {e}") return None # Create Gradio UI demo = gr.Interface( fn=infer, inputs=gr.Image(type="pil"), outputs=gr.Image(type="numpy"), title="Convolutional Reconstruction Model", description="Upload an image to get the reconstructed output." ) if __name__ == "__main__": demo.launch()