vidit98 commited on
Commit
805c611
·
1 Parent(s): 2171e8f

add gif tutorial

Browse files
Files changed (2) hide show
  1. app.py +13 -31
  2. assets/GIF.gif +0 -0
app.py CHANGED
@@ -129,7 +129,6 @@ class ImageComp:
129
  maski[mask_ > 127] = 1
130
  mask = maski * panoptic_mask_
131
  unique_ids, counts = torch.unique(mask, return_counts=True)
132
- # print(unique_ids, counts)
133
  mask_id = unique_ids[torch.argmax(counts[1:]) + 1]
134
  final_mask = torch.zeros(mask.shape).cuda()
135
  final_mask[panoptic_mask_ == mask_id] = 1
@@ -214,7 +213,7 @@ class ImageComp:
214
  seed_everything(seed)
215
 
216
  scale = [scale_s, scale_f, scale_t]
217
- print(scale)
218
  if save_memory:
219
  model.low_vram_shift(is_diffusing=False)
220
  # uc_cross = model.get_unconditional_conditioning(num_samples)
@@ -256,7 +255,6 @@ def create_app_demo():
256
 
257
  with gr.Row():
258
  gr.Markdown("## Object Level Appearance Editing")
259
- print('first row')
260
  with gr.Row():
261
  gr.HTML(
262
  """
@@ -264,15 +262,13 @@ def create_app_demo():
264
  <h3 style="font-weight: 450; font-size: 1rem; margin-top: 0.8rem; margin-bottom: 0.8rem">
265
  Instructions </h3>
266
  <ol>
267
- <li>1. Upload an Input Image.</li>
268
- <li>2. Mark one of segmented objects in the <i>Select Object to Edit</i> tab.</li>
269
- <li>3. Upload an Reference Image.</li>
270
- <li>4. Mark one of segmented objects in the <i>Select Reference Object</i> tab, whose appearance needs to used in the selected input object.</li>
271
- <li>5. Enter a prompt and press <i>Run</i> button. (A very simple would also work) </li>
272
- </ol>
273
  </ol>
274
  </div>""")
275
- print('second row')
276
  with gr.Column():
277
  with gr.Row():
278
  img_edit = ImageComp('edit_app')
@@ -364,7 +360,7 @@ with block:
364
  <a href=" https://people.eecs.berkeley.edu/~trevor/" style="color:blue;">Trevor Darrell</a><sup>4</sup>,
365
  <a href="https://vita-group.github.io/" style="color:blue;">Zhangyang Wang</a><sup>1,3</sup>
366
  and <a href="https://www.humphreyshi.com/home" style="color:blue;">Humphrey Shi</a> <sup>1,5,6</sup> <br>
367
- [<a href="https://github.com/Picsart-AI-Research/PAIR-Diffusion" style="color:red;">arXiv</a>]
368
  [<a href="https://github.com/Picsart-AI-Research/PAIR-Diffusion" style="color:red;">GitHub</a>]
369
  </h2>
370
  <h3 style="font-weight: 450; font-size: 1rem; margin: 0rem">
@@ -384,6 +380,11 @@ with block:
384
 
385
  with gr.Tab('Edit Appearance'):
386
  create_app_demo()
 
 
 
 
 
387
  with gr.Tab('Edit Structure'):
388
  create_struct_demo()
389
  with gr.Tab('Edit Both'):
@@ -391,23 +392,4 @@ with block:
391
 
392
 
393
 
394
-
395
-
396
- print('Launching')
397
- block.launch(debug=True)
398
-
399
-
400
- # import gradio as gr
401
- # from transformers import pipeline
402
-
403
- # # pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
404
-
405
- # def predict(image):
406
- # return {"hot dog": 0.1 for p in range(2)}
407
-
408
- # gr.Interface(
409
- # predict,
410
- # inputs=gr.inputs.Image(label="Upload hot dog candidate", type="filepath"),
411
- # outputs=gr.outputs.Label(num_top_classes=2),
412
- # title="Hot Dog? Or Not?",
413
- # ).launch()
 
129
  maski[mask_ > 127] = 1
130
  mask = maski * panoptic_mask_
131
  unique_ids, counts = torch.unique(mask, return_counts=True)
 
132
  mask_id = unique_ids[torch.argmax(counts[1:]) + 1]
133
  final_mask = torch.zeros(mask.shape).cuda()
134
  final_mask[panoptic_mask_ == mask_id] = 1
 
213
  seed_everything(seed)
214
 
215
  scale = [scale_s, scale_f, scale_t]
216
+ # print(scale)
217
  if save_memory:
218
  model.low_vram_shift(is_diffusing=False)
219
  # uc_cross = model.get_unconditional_conditioning(num_samples)
 
255
 
256
  with gr.Row():
257
  gr.Markdown("## Object Level Appearance Editing")
 
258
  with gr.Row():
259
  gr.HTML(
260
  """
 
262
  <h3 style="font-weight: 450; font-size: 1rem; margin-top: 0.8rem; margin-bottom: 0.8rem">
263
  Instructions </h3>
264
  <ol>
265
+ <li>Upload an Input Image.</li>
266
+ <li>Mark one of segmented objects in the <i>Select Object to Edit</i> tab.</li>
267
+ <li>Upload an Reference Image.</li>
268
+ <li>Mark one of segmented objects in the <i>Select Reference Object</i> tab, whose appearance needs to used in the selected input object.</li>
269
+ <li>Enter a prompt and press <i>Run</i> button. (A very simple would also work) </li>
 
270
  </ol>
271
  </div>""")
 
272
  with gr.Column():
273
  with gr.Row():
274
  img_edit = ImageComp('edit_app')
 
360
  <a href=" https://people.eecs.berkeley.edu/~trevor/" style="color:blue;">Trevor Darrell</a><sup>4</sup>,
361
  <a href="https://vita-group.github.io/" style="color:blue;">Zhangyang Wang</a><sup>1,3</sup>
362
  and <a href="https://www.humphreyshi.com/home" style="color:blue;">Humphrey Shi</a> <sup>1,5,6</sup> <br>
363
+ [<a href="https://arxiv.org/abs/2303.17546" style="color:red;">arXiv</a>]
364
  [<a href="https://github.com/Picsart-AI-Research/PAIR-Diffusion" style="color:red;">GitHub</a>]
365
  </h2>
366
  <h3 style="font-weight: 450; font-size: 1rem; margin: 0rem">
 
380
 
381
  with gr.Tab('Edit Appearance'):
382
  create_app_demo()
383
+ gr.HTML("""<br><p>&nbsp Visual guide to use the demo</p><br>
384
+ <div id="myinst">
385
+ <img src="file/assets/GIF.gif" width="400" height="400">
386
+ </div>
387
+ """)
388
  with gr.Tab('Edit Structure'):
389
  create_struct_demo()
390
  with gr.Tab('Edit Both'):
 
392
 
393
 
394
 
395
+ block.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
assets/GIF.gif ADDED