QinOwen commited on
Commit
869596d
1 Parent(s): d4a2e0d

change-weights-path

Browse files
VADER-VideoCrafter/scripts/main/train_t2v_lora.py CHANGED
@@ -580,12 +580,14 @@ def run_training(args, model, **kwargs):
580
 
581
  # load the pretrained LoRA model
582
  if args.lora_ckpt_path != "Base Model":
583
- if args.lora_ckpt_path == "huggingface-hps-aesthetic": # download the pretrained LoRA model from huggingface
584
- snapshot_download(repo_id='zheyangqin/VADER', local_dir ='VADER-VideoCrafter/checkpoints/pretrained_lora')
585
- args.lora_ckpt_path = 'VADER-VideoCrafter/checkpoints/pretrained_lora/vader_videocrafter_hps_aesthetic.pt'
586
- elif args.lora_ckpt_path == "huggingface-pickscore": # download the pretrained LoRA model from huggingface
587
- snapshot_download(repo_id='zheyangqin/VADER', local_dir ='VADER-VideoCrafter/checkpoints/pretrained_lora')
588
- args.lora_ckpt_path = 'VADER-VideoCrafter/checkpoints/pretrained_lora/vader_videocrafter_pickscore.pt'
 
 
589
  # load the pretrained LoRA model
590
  peft.set_peft_model_state_dict(peft_model, torch.load(args.lora_ckpt_path))
591
 
 
580
 
581
  # load the pretrained LoRA model
582
  if args.lora_ckpt_path != "Base Model":
583
+ if args.lora_ckpt_path == "huggingface-pickscore": # download the pretrained LoRA model from huggingface
584
+ os.makedirs('VADER-VideoCrafter/checkpoints/pretrained_lora_pickScore', exist_ok=True)
585
+ snapshot_download(repo_id='zheyangqin/VADER_VideoCrafter_PickScore', local_dir ='VADER-VideoCrafter/checkpoints/pretrained_lora_pickScore')
586
+ args.lora_ckpt_path = 'VADER-VideoCrafter/checkpoints/pretrained_lora_pickScore/vader_videocrafter_pickscore.pt'
587
+ elif args.lora_ckpt_path == "huggingface-hps-aesthetic": # download the pretrained LoRA model from huggingface
588
+ os.makedirs('VADER-VideoCrafter/checkpoints/pretrained_lora_hps_aesthetic', exist_ok=True)
589
+ snapshot_download(repo_id='zheyangqin/VADER_VideoCrafter_HPS_Aesthetic', local_dir ='VADER-VideoCrafter/checkpoints/pretrained_lora_hps_aesthetic')
590
+ args.lora_ckpt_path = 'VADER-VideoCrafter/checkpoints/pretrained_lora_hps_aesthetic/vader_videocrafter_hps_aesthetic.pt'
591
  # load the pretrained LoRA model
592
  peft.set_peft_model_state_dict(peft_model, torch.load(args.lora_ckpt_path))
593
 
app.py CHANGED
@@ -51,7 +51,7 @@ def gradio_main_fn(prompt, lora_model, lora_rank, seed, height, width, unconditi
51
  return video_path
52
 
53
  def reset_fn():
54
- return ("A mermaid with flowing hair and a shimmering tail discovers a hidden underwater kingdom adorned with coral palaces, glowing pearls, and schools of colorful fish, encountering both wonders and dangers along the way.",
55
  200, 384, 512, 12.0, 25, 1.0, 24, 16, 10, "huggingface-pickscore")
56
 
57
  def update_lora_rank(lora_model):
@@ -198,7 +198,7 @@ with gr.Blocks(css=custom_css) as demo:
198
  )
199
  lora_rank = gr.Slider(minimum=8, maximum=16, label="LoRA Rank", step = 8, value=16)
200
  prompt = gr.Textbox(placeholder="Enter prompt text here", lines=4, label="Text Prompt",
201
- value="A mermaid with flowing hair and a shimmering tail discovers a hidden underwater kingdom adorned with coral palaces, glowing pearls, and schools of colorful fish, encountering both wonders and dangers along the way.")
202
  run_btn = gr.Button("Run Inference")
203
 
204
  with gr.Column():
 
51
  return video_path
52
 
53
  def reset_fn():
54
+ return ("A brown dog eagerly eats from a bowl in a kitchen.",
55
  200, 384, 512, 12.0, 25, 1.0, 24, 16, 10, "huggingface-pickscore")
56
 
57
  def update_lora_rank(lora_model):
 
198
  )
199
  lora_rank = gr.Slider(minimum=8, maximum=16, label="LoRA Rank", step = 8, value=16)
200
  prompt = gr.Textbox(placeholder="Enter prompt text here", lines=4, label="Text Prompt",
201
+ value="A brown dog eagerly eats from a bowl in a kitchen.")
202
  run_btn = gr.Button("Run Inference")
203
 
204
  with gr.Column():