himanshu1844 commited on
Commit
c221508
·
1 Parent(s): 55f6ef6
Files changed (3) hide show
  1. app.py +1 -1
  2. requirements..txt +9 -9
  3. voxify.py +3 -5
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import gradio as gr
2
- from voxify import VoxifyInfereence
3
  import torchaudio
4
  voxify=VoxifyInfereence(name="declare-lab/TangoFlux")
5
  def gradio_generate(prompt, steps, guidance,duration=10):
 
1
  import gradio as gr
2
+ from Voxify import VoxifyInfereence
3
  import torchaudio
4
  voxify=VoxifyInfereence(name="declare-lab/TangoFlux")
5
  def gradio_generate(prompt, steps, guidance,duration=10):
requirements..txt CHANGED
@@ -1,11 +1,11 @@
1
- torch==2.4.0
2
- torchaudio===2.4.0
3
- torchlibrosa==0.1.0
4
- torchvision==0.19.0
5
- transformers==4.44.0
6
- diffusers==0.32.0
7
- accelerate==0.34.2
8
- datasets==2.21.0
9
  librosa
10
  tqdm
11
- wavio==0.0.7
 
1
+ torch
2
+ torchaudio
3
+ torchlibrosa
4
+ torchvision
5
+ transformers
6
+ diffusers
7
+ accelerate
8
+ datasets
9
  librosa
10
  tqdm
11
+ wavio
voxify.py CHANGED
@@ -1,7 +1,5 @@
1
- import torch
2
  from diffusers import AutoencoderOobleck
3
- from torch import nn
4
- import numpy as np
5
  from model import Voxify
6
  from huggingface_hub import snapshot_download
7
  from safetensors.torch import load_file
@@ -13,8 +11,8 @@ class VoxifyInfereence:
13
  weights=load_file("{}/tangoflux.safetensors".format(path))
14
  with open("{}/config.json".format(path), "r") as f:
15
  config = json.load(f)
16
- self.voxify = Voxify(config)
17
- self.voxify.load_state_dict(weights,strict=False)
18
  def generate(self, prompt,steps=25,duration=10,guidance_scale=4.5):
19
  with torch.no_grad():
20
  latent=self.model.inference_flow(prompt,
 
 
1
  from diffusers import AutoencoderOobleck
2
+ import torch
 
3
  from model import Voxify
4
  from huggingface_hub import snapshot_download
5
  from safetensors.torch import load_file
 
11
  weights=load_file("{}/tangoflux.safetensors".format(path))
12
  with open("{}/config.json".format(path), "r") as f:
13
  config = json.load(f)
14
+ self.model = Voxify(config)
15
+ self.model.load_state_dict(weights,strict=False)
16
  def generate(self, prompt,steps=25,duration=10,guidance_scale=4.5):
17
  with torch.no_grad():
18
  latent=self.model.inference_flow(prompt,