Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -44,8 +44,11 @@ print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
|
|
44 |
|
45 |
description = """
|
46 |
# Lumina Next Text-to-Image
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
49 |
"""
|
50 |
|
51 |
hf_token = os.environ["HF_TOKEN"]
|
@@ -184,10 +187,6 @@ def infer_ode(args, infer_args, text_encoder, tokenizer, vae, model):
|
|
184 |
os.environ["RANK"] = str(0)
|
185 |
os.environ["WORLD_SIZE"] = str(args.num_gpus)
|
186 |
|
187 |
-
# dist.init_process_group("nccl")
|
188 |
-
# set up fairscale environment because some methods of the Lumina model need it,
|
189 |
-
# though for single-GPU inference fairscale actually has no effect
|
190 |
-
# fs_init.initialize_model_parallel(args.num_gpus)
|
191 |
torch.cuda.set_device(0)
|
192 |
|
193 |
# loading model to gpu
|
@@ -586,6 +585,7 @@ def main():
|
|
586 |
],
|
587 |
[cap],
|
588 |
label="Examples",
|
|
|
589 |
)
|
590 |
|
591 |
@spaces.GPU(duration=240)
|
|
|
44 |
|
45 |
description = """
|
46 |
# Lumina Next Text-to-Image
|
47 |
+
|
48 |
+
#### Lumina-Next-T2I is a 2B `Next-DiT` model with `Gemma-2B` text encoder.
|
49 |
+
|
50 |
+
#### Demo current model: `Lumina-Next-T2I`
|
51 |
+
|
52 |
"""
|
53 |
|
54 |
hf_token = os.environ["HF_TOKEN"]
|
|
|
187 |
os.environ["RANK"] = str(0)
|
188 |
os.environ["WORLD_SIZE"] = str(args.num_gpus)
|
189 |
|
|
|
|
|
|
|
|
|
190 |
torch.cuda.set_device(0)
|
191 |
|
192 |
# loading model to gpu
|
|
|
585 |
],
|
586 |
[cap],
|
587 |
label="Examples",
|
588 |
+
examples_per_page=22,
|
589 |
)
|
590 |
|
591 |
@spaces.GPU(duration=240)
|