multimodalart HF staff commited on
Commit
a3595a3
1 Parent(s): c29cb26

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -7
app.py CHANGED
@@ -26,6 +26,10 @@ device = "cuda"
26
 
27
  hf_hub_download(repo_id="stabilityai/stable-video-diffusion-img2vid-xt", filename="svd_xt.safetensors", local_dir="checkpoints", token=os.getenv("HF_TOKEN"))
28
 
 
 
 
 
29
  def load_model(
30
  config: str,
31
  device: str,
@@ -164,7 +168,7 @@ def sample(
164
  video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
165
  writer = cv2.VideoWriter(
166
  video_path,
167
- cv2.VideoWriter_fourcc(*'avc1'),
168
  fps_id + 1,
169
  (samples.shape[-1], samples.shape[-2]),
170
  )
@@ -263,18 +267,18 @@ def resize_image(image, output_size=(1024, 576)):
263
 
264
  return cropped_image
265
 
266
- with gr.Blocks() as demo:
267
  gr.Markdown('''# Stable Video Diffusion - Image2Video - XT
268
- Generate 25 frames of video from a single image using SDV-XT.
269
  ''')
270
  with gr.Column():
271
  image = gr.Image(label="Upload your image (it will be center cropped to 1024x576)", type="pil")
272
  generate_btn = gr.Button("Generate")
273
- with gr.Accordion("Advanced options", open=False):
274
- cond_aug = gr.Slider(label="Conditioning augmentation", value=0.02, minimum=0.0)
275
- seed = gr.Slider(label="Seed", value=42, minimum=0, maximum=int(1e9), step=1)
276
  #decoding_t = gr.Slider(label="Decode frames at a time", value=6, minimum=1, maximum=14, interactive=False)
277
- saving_fps = gr.Slider(label="Saving FPS", value=6, minimum=6, maximum=48, step=6)
278
  with gr.Column():
279
  video = gr.Video()
280
  image.upload(fn=resize_image, inputs=image, outputs=image)
 
26
 
27
  hf_hub_download(repo_id="stabilityai/stable-video-diffusion-img2vid-xt", filename="svd_xt.safetensors", local_dir="checkpoints", token=os.getenv("HF_TOKEN"))
28
 
29
+ css = '''
30
+ .gradio-container{max-width:900px}
31
+ '''
32
+
33
  def load_model(
34
  config: str,
35
  device: str,
 
168
  video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
169
  writer = cv2.VideoWriter(
170
  video_path,
171
+ cv2.VideoWriter_fourcc(*'mp4v'),
172
  fps_id + 1,
173
  (samples.shape[-1], samples.shape[-2]),
174
  )
 
267
 
268
  return cropped_image
269
 
270
+ with gr.Blocks(css=css) as demo:
271
  gr.Markdown('''# Stable Video Diffusion - Image2Video - XT
272
+ Generate 25 frames of video from a single image with SDV-XT. [Join the waitlist](https://stability.ai/contact) for the text-to-video web experience
273
  ''')
274
  with gr.Column():
275
  image = gr.Image(label="Upload your image (it will be center cropped to 1024x576)", type="pil")
276
  generate_btn = gr.Button("Generate")
277
+ #with gr.Accordion("Advanced options", open=False):
278
+ # cond_aug = gr.Slider(label="Conditioning augmentation", value=0.02, minimum=0.0)
279
+ # seed = gr.Slider(label="Seed", value=42, minimum=0, maximum=int(1e9), step=1)
280
  #decoding_t = gr.Slider(label="Decode frames at a time", value=6, minimum=1, maximum=14, interactive=False)
281
+ # saving_fps = gr.Slider(label="Saving FPS", value=6, minimum=6, maximum=48, step=6)
282
  with gr.Column():
283
  video = gr.Video()
284
  image.upload(fn=resize_image, inputs=image, outputs=image)