File size: 7,359 Bytes
b1828f2
824b515
fe91ef5
824b515
0c2b3fc
 
 
5098655
824b515
 
 
b1828f2
fe91ef5
824b515
 
0c2b3fc
 
 
 
fe91ef5
f4723f0
 
 
 
 
 
 
 
 
 
 
 
 
 
fe91ef5
 
5098655
824b515
5098655
824b515
 
 
 
0c2b3fc
 
 
 
 
 
 
824b515
5098655
 
824b515
 
5098655
 
 
824b515
5098655
824b515
5098655
824b515
 
 
 
869596d
fe91ef5
824b515
 
 
 
 
 
 
fe91ef5
824b515
 
 
 
 
 
 
 
 
fe91ef5
 
 
 
20a2d44
 
fe91ef5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
824b515
fe91ef5
824b515
fe91ef5
20a2d44
fe91ef5
 
d4a2e0d
fe91ef5
 
 
5098655
869596d
5098655
fe91ef5
20a2d44
fe91ef5
824b515
fe91ef5
20a2d44
5098655
824b515
 
 
 
2ad9d00
 
824b515
 
 
2ad9d00
824b515
 
 
a27d721
824b515
 
 
 
 
 
 
 
 
 
5098655
 
 
824b515
 
 
 
 
 
fe91ef5
5098655
 
 
fe91ef5
5098655
fe91ef5
2846498
fe91ef5
824b515
fe91ef5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
import gradio as gr
import os
import spaces
import sys
from transformers import pipeline


from copy import deepcopy
sys.path.append('./VADER-VideoCrafter/scripts/main')
sys.path.append('./VADER-VideoCrafter/scripts')
sys.path.append('./VADER-VideoCrafter')


from train_t2v_lora import main_fn, setup_model

translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")



examples = [
    ["Fairy and Magical Flowers: A fairy tends to enchanted, glowing flowers.", 'huggingface-hps-aesthetic', 
     8, 901, 384, 512, 12.0, 25, 1.0, 24, 10],
    ["A cat playing an electric guitar in a loft with industrial-style decor and soft, multicolored lights.", 
     'huggingface-hps-aesthetic', 8, 208, 384, 512, 12.0, 25, 1.0, 24, 10],
    ["A raccoon playing a guitar under a blossoming cherry tree.", 
     'huggingface-hps-aesthetic', 8, 180, 384, 512, 12.0, 25, 1.0, 24, 10],
    ["A raccoon playing an electric bass in a garage band setting.", 
     'huggingface-hps-aesthetic', 8, 400, 384, 512, 12.0, 25, 1.0, 24, 10],
    ["A talking bird with shimmering feathers and a melodious voice finds a legendary treasure, guiding through enchanted forests, ancient ruins, and mystical challenges.",
     "huggingface-pickscore", 16, 200, 384, 512, 12.0, 25, 1.0, 24, 10],
    ["A snow princess stands on the balcony of her ice castle, her hair adorned with delicate snowflakes, overlooking her serene realm.",
     "huggingface-pickscore", 16, 400, 384, 512, 12.0, 25, 1.0, 24, 10],
    ["A mermaid with flowing hair and a shimmering tail discovers a hidden underwater kingdom adorned with coral palaces, glowing pearls, and schools of colorful fish, encountering both wonders and dangers along the way.",
     "huggingface-pickscore", 16, 800, 384, 512, 12.0, 25, 1.0, 24, 10],
]

model = setup_model()

def gradio_main_fn(prompt, lora_model, lora_rank, seed, height, width, unconditional_guidance_scale, ddim_steps, ddim_eta,
                   frames, savefps):
    global model
    if model is None:
        return "Model is not loaded. Please load the model first."
    
    # 한글 입력 감지 및 번역
    if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
        translated = translator(prompt, max_length=512)[0]['translation_text']
        print(f"Translated prompt: {translated}")
        prompt = translated

    video_path = main_fn(prompt=prompt,
                    lora_model=lora_model,
                    lora_rank=int(lora_rank),
                    seed=int(seed),
                    height=int(height), 
                    width=int(width),
                    unconditional_guidance_scale=float(unconditional_guidance_scale),
                    ddim_steps=int(ddim_steps),
                    ddim_eta=float(ddim_eta), 
                    frames=int(frames),
                    savefps=int(savefps),
                    model=deepcopy(model))

    return video_path

def reset_fn():
    return ("A brown dog eagerly eats from a bowl in a kitchen.", 
            200, 384, 512, 12.0, 25, 1.0, 24, 16, 10, "huggingface-pickscore")

def update_lora_rank(lora_model):
    if lora_model == "huggingface-pickscore":
        return gr.update(value=16)
    elif lora_model == "huggingface-hps-aesthetic":
        return gr.update(value=8)
    else: # "Base Model"
        return gr.update(value=8)

def update_dropdown(lora_rank):
    if lora_rank == 16:
        return gr.update(value="huggingface-pickscore")
    elif lora_rank == 8:
        return gr.update(value="huggingface-hps-aesthetic")
    else: # 0
        return gr.update(value="Base Model")

custom_css = """
    #centered {
        display: flex;
        justify-content: center;
        width: 60%;
        margin: 0 auto;
    }
    .column-centered {
        display: flex;
        flex-direction: column;
        align-items: center;
        width: 60%;
    }
    #image-upload {
        flex-grow: 1;
    }
    #params .tabs {
        display: flex;
        flex-direction: column;
        flex-grow: 1;
    }
    #params .tabitem[style="display: block;"] {
        flex-grow: 1;
        display: flex !important;
    }
    #params .gap {
        flex-grow: 1;
    }
    #params .form {
        flex-grow: 1 !important;
    }
    #params .form > :last-child{
        flex-grow: 1;
    }
"""

with gr.Blocks(css=custom_css) as demo:

    with gr.Row(elem_id="centered"):
        with gr.Column(elem_id="params"):
            lora_model = gr.Dropdown(
                label="VADER Model",
                choices=["huggingface-pickscore", "huggingface-hps-aesthetic"],
                value="huggingface-pickscore"
            )
            lora_rank = gr.Slider(minimum=8, maximum=16, label="LoRA Rank", step = 8, value=16)
            prompt = gr.Textbox(placeholder="Enter prompt text here", lines=4, label="Text Prompt",
                                value="A brown dog eagerly eats from a bowl in a kitchen.")
            run_btn = gr.Button("Run Inference")

        with gr.Column():
            output_video = gr.Video(elem_id="image-upload")
            
    with gr.Row(elem_id="centered"):
        with gr.Column():      
           

            seed = gr.Slider(minimum=0, maximum=65536, label="Seed", step = 1, value=200)

            with gr.Row():
                height = gr.Slider(minimum=0, maximum=512, label="Height", step = 16, value=384)
                width = gr.Slider(minimum=0, maximum=512, label="Width", step = 16, value=512)

            with gr.Row():
                frames = gr.Slider(minimum=0, maximum=50, label="Frames", step = 1, value=24)
                savefps = gr.Slider(minimum=0, maximum=30, label="Save FPS", step = 1, value=10)
            
            
            with gr.Row():
                DDIM_Steps = gr.Slider(minimum=0, maximum=50, label="DDIM Steps", step = 1, value=50)
                unconditional_guidance_scale = gr.Slider(minimum=0, maximum=50, label="Guidance Scale", step = 0.1, value=12.0)
                DDIM_Eta = gr.Slider(minimum=0, maximum=1, label="DDIM Eta", step = 0.01, value=1.0)

            # reset button
            reset_btn = gr.Button("Reset")
            
            reset_btn.click(fn=reset_fn, outputs=[prompt, seed, height, width, unconditional_guidance_scale, DDIM_Steps, DDIM_Eta, frames, lora_rank, savefps, lora_model])
                

            run_btn.click(fn=gradio_main_fn, 
                        inputs=[prompt, lora_model, lora_rank,
                                seed, height, width, unconditional_guidance_scale, 
                                DDIM_Steps, DDIM_Eta, frames, savefps],
                        outputs=output_video
                        )
            
            lora_model.change(fn=update_lora_rank, inputs=lora_model, outputs=lora_rank)
            lora_rank.change(fn=update_dropdown, inputs=lora_rank, outputs=lora_model)

            gr.Examples(examples=examples,
                    inputs=[prompt, lora_model, lora_rank, seed, 
                            height, width, unconditional_guidance_scale, 
                            DDIM_Steps, DDIM_Eta, frames, savefps],
                    outputs=output_video,
                    fn=gradio_main_fn,
                    run_on_click=False,
                    cache_examples="lazy",
                    )

demo.launch(share=True)