ClaireOzzz commited on
Commit
8e4a774
1 Parent(s): 59c4941

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -311
app.py CHANGED
@@ -6,225 +6,9 @@ import requests
6
  import subprocess
7
  from subprocess import getoutput
8
  from huggingface_hub import login, HfFileSystem, snapshot_download, HfApi, create_repo
9
- #api = HfApi()
10
 
11
- #hf_token = os.environ.get("HF_TOKEN_WITH_WRITE_PERMISSION")
12
-
13
- #is_shared_ui = True if "fffiloni/train-dreambooth-lora-sdxl" in os.environ['SPACE_ID'] else False
14
-
15
- is_gpu_associated = torch.cuda.is_available()
16
-
17
- is_shared_ui = False
18
-
19
- hf_token = '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'
20
-
21
- fs = HfFileSystem(token=hf_token)
22
- api = HfApi()
23
-
24
- if is_gpu_associated:
25
- gpu_info = getoutput('nvidia-smi')
26
- if("A10G" in gpu_info):
27
- which_gpu = "A10G"
28
- elif("T4" in gpu_info):
29
- which_gpu = "T4"
30
- else:
31
- which_gpu = "CPU"
32
-
33
- def check_upload_or_no(value):
34
- if value is True:
35
- return gr.update(visible=True)
36
- else:
37
- return gr.update(visible=False)
38
-
39
- def load_images_to_dataset(images, dataset_name):
40
-
41
- if is_shared_ui:
42
- raise gr.Error("This Space only works in duplicated instances")
43
-
44
- if dataset_name == "":
45
- raise gr.Error("You forgot to name your new dataset. ")
46
-
47
- # Create the directory if it doesn't exist
48
- my_working_directory = f"my_working_directory_for_{dataset_name}"
49
- if not os.path.exists(my_working_directory):
50
- os.makedirs(my_working_directory)
51
-
52
- # Assuming 'images' is a list of image file paths
53
- for idx, image in enumerate(images):
54
- # Get the base file name (without path) from the original location
55
- image_name = os.path.basename(image.name)
56
-
57
- # Construct the destination path in the working directory
58
- destination_path = os.path.join(my_working_directory, image_name)
59
-
60
- # Copy the image from the original location to the working directory
61
- shutil.copy(image.name, destination_path)
62
-
63
- # Print the image name and its corresponding save path
64
- print(f"Image {idx + 1}: {image_name} copied to {destination_path}")
65
-
66
- path_to_folder = my_working_directory
67
- your_username = api.whoami(token=hf_token)["name"]
68
- repo_id = f"{your_username}/{dataset_name}"
69
- create_repo(repo_id=repo_id, repo_type="dataset", token=hf_token)
70
-
71
- api.upload_folder(
72
- folder_path=path_to_folder,
73
- repo_id=repo_id,
74
- repo_type="dataset",
75
- token=hf_token
76
- )
77
-
78
- return "Done, your dataset is ready and loaded for the training step!", repo_id
79
-
80
- def swap_hardware(hf_token, hardware="cpu-basic"):
81
- hardware_url = f"https://huggingface.co/spaces/ClaireOzzz/train-dreambooth-lora-sdxl/hardware"
82
- headers = { "authorization" : f"Bearer {hf_token}"}
83
- body = {'flavor': hardware}
84
- requests.post(hardware_url, json = body, headers=headers)
85
-
86
- def swap_sleep_time(hf_token,sleep_time):
87
- sleep_time_url = f"https://huggingface.co/api/spaces/ClaireOzzz/train-dreambooth-lora-sdxl/sleeptime"
88
- headers = { "authorization" : f"Bearer {hf_token}"}
89
- body = {'seconds':sleep_time}
90
- requests.post(sleep_time_url,json=body,headers=headers)
91
-
92
- def get_sleep_time(hf_token):
93
- sleep_time_url = f"https://huggingface.co/api/spaces/ClaireOzzz/train-dreambooth-lora-sdxl"
94
- headers = { "authorization" : f"Bearer {hf_token}"}
95
- response = requests.get(sleep_time_url,headers=headers)
96
- try:
97
- gcTimeout = response.json()['runtime']['gcTimeout']
98
- except:
99
- gcTimeout = None
100
- return gcTimeout
101
-
102
- def write_to_community(title, description,hf_token):
103
-
104
- api.create_discussion(repo_id=os.environ['ClaireOzzz/train-dreambooth-lora-sdxl'], title=title, description=description,repo_type="space", token=hf_token)
105
-
106
-
107
- def set_accelerate_default_config():
108
- try:
109
- subprocess.run(["accelerate", "config", "default"], check=True)
110
- print("Accelerate default config set successfully!")
111
- except subprocess.CalledProcessError as e:
112
- print(f"An error occurred: {e}")
113
-
114
- def train_dreambooth_lora_sdxl(dataset_id, instance_data_dir, lora_trained_xl_folder, instance_prompt, max_train_steps, checkpoint_steps, remove_gpu):
115
-
116
- script_filename = "train_dreambooth_lora_sdxl.py" # Assuming it's in the same folder
117
-
118
- command = [
119
- "accelerate",
120
- "launch",
121
- script_filename, # Use the local script
122
- "--pretrained_model_name_or_path=stabilityai/stable-diffusion-xl-base-1.0",
123
- "--pretrained_vae_model_name_or_path=madebyollin/sdxl-vae-fp16-fix",
124
- f"--dataset_id={dataset_id}",
125
- f"--instance_data_dir={instance_data_dir}",
126
- f"--output_dir={lora_trained_xl_folder}",
127
- "--mixed_precision=fp16",
128
- f"--instance_prompt={instance_prompt}",
129
- "--resolution=1024",
130
- "--train_batch_size=2",
131
- "--gradient_accumulation_steps=2",
132
- "--gradient_checkpointing",
133
- "--learning_rate=1e-4",
134
- "--lr_scheduler=constant",
135
- "--lr_warmup_steps=0",
136
- "--enable_xformers_memory_efficient_attention",
137
- "--mixed_precision=fp16",
138
- "--use_8bit_adam",
139
- f"--max_train_steps={max_train_steps}",
140
- f"--checkpointing_steps={checkpoint_steps}",
141
- "--seed=0",
142
- "--push_to_hub",
143
- f"--hub_token={hf_token}"
144
- ]
145
-
146
- try:
147
- subprocess.run(command, check=True)
148
- print("Training is finished!")
149
- if remove_gpu:
150
- swap_hardware(hf_token, "cpu-basic")
151
- else:
152
- swap_sleep_time(hf_token, 300)
153
- except subprocess.CalledProcessError as e:
154
- print(f"An error occurred: {e}")
155
-
156
- title="There was an error on during your training"
157
- description=f'''
158
- Unfortunately there was an error during training your {lora_trained_xl_folder} model.
159
- Please check it out below. Feel free to report this issue to [SD-XL Dreambooth LoRa Training](https://huggingface.co/spaces/fffiloni/train-dreambooth-lora-sdxl):
160
- ```
161
- {str(e)}
162
- ```
163
- '''
164
- if remove_gpu:
165
- swap_hardware(hf_token, "cpu-basic")
166
- else:
167
- swap_sleep_time(hf_token, 300)
168
- #write_to_community(title,description,hf_token)
169
-
170
- def main(dataset_id,
171
- lora_trained_xl_folder,
172
- instance_prompt,
173
- max_train_steps,
174
- checkpoint_steps,
175
- remove_gpu):
176
-
177
-
178
- if is_shared_ui:
179
- raise gr.Error("This Space only works in duplicated instances")
180
-
181
- if not is_gpu_associated:
182
- raise gr.Error("Please associate a T4 or A10G GPU for this Space")
183
-
184
- if dataset_id == "":
185
- raise gr.Error("You forgot to specify an image dataset")
186
-
187
- if instance_prompt == "":
188
- raise gr.Error("You forgot to specify a concept prompt")
189
-
190
- if lora_trained_xl_folder == "":
191
- raise gr.Error("You forgot to name the output folder for your model")
192
-
193
- sleep_time = get_sleep_time(hf_token)
194
- if sleep_time:
195
- swap_sleep_time(hf_token, -1)
196
-
197
- gr.Warning("If you did not check the `Remove GPU After training`, don't forget to remove the GPU attribution after you are done. ")
198
-
199
- dataset_repo = dataset_id
200
-
201
- # Automatically set local_dir based on the last part of dataset_repo
202
- repo_parts = dataset_repo.split("/")
203
- local_dir = f"./{repo_parts[-1]}" # Use the last part of the split
204
-
205
- # Check if the directory exists and create it if necessary
206
- if not os.path.exists(local_dir):
207
- os.makedirs(local_dir)
208
-
209
- gr.Info("Downloading dataset ...")
210
-
211
- snapshot_download(
212
- dataset_repo,
213
- local_dir=local_dir,
214
- repo_type="dataset",
215
- ignore_patterns=".gitattributes",
216
- token=hf_token
217
- )
218
-
219
- set_accelerate_default_config()
220
-
221
- gr.Info("Training begins ...")
222
-
223
- instance_data_dir = repo_parts[-1]
224
- train_dreambooth_lora_sdxl(dataset_id, instance_data_dir, lora_trained_xl_folder, instance_prompt, max_train_steps, checkpoint_steps, remove_gpu)
225
-
226
- your_username = api.whoami(token=hf_token)["name"]
227
- return f"Done, your trained model has been stored in your models library: {your_username}/{lora_trained_xl_folder}"
228
 
229
  css="""
230
  #col-container {max-width: 780px; margin-left: auto; margin-right: auto;}
@@ -295,99 +79,16 @@ min-height: 60px;
295
  """
296
 
297
  with gr.Blocks(css=css) as demo:
298
- with gr.Column(elem_id="col-container"):
299
- if is_shared_ui:
300
- top_description = gr.HTML(f'''
301
- <div class="gr-prose">
302
- <h2><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
303
- Attention: this Space need to be duplicated to work</h2>
304
- <p class="main-message">
305
- To make it work, <strong>duplicate the Space</strong> and run it on your own profile using a <strong>private</strong> GPU (T4-small or A10G-small).<br />
306
- A T4 costs <strong>US$0.60/h</strong>, so it should cost < US$1 to train most models.
307
- </p>
308
- <p class="actions">
309
-
310
- to start training your own image model
311
- </p>
312
- </div>
313
- ''', elem_id="warning-duplicate")
314
- else:
315
- if(is_gpu_associated):
316
- top_description = gr.HTML(f'''
317
- <div class="gr-prose">
318
- <h2><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
319
- You have successfully associated a {which_gpu} GPU to the SD-XL Training Space 🎉</h2>
320
- <p>
321
- You can now train your model! You will be billed by the minute from when you activated the GPU until when it is turned off.
322
- </p>
323
- </div>
324
- ''', elem_id="warning-ready")
325
- else:
326
- top_description = gr.HTML(f'''
327
- <div class="gr-prose">
328
- <h2><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
329
- You have successfully duplicated the SD-XL Training Space 🎉</h2>
330
- <p>There's only one step left before you can train your model: <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings" style="text-decoration: underline" target="_blank">attribute a <b>T4-small or A10G-small GPU</b> to it (via the Settings tab)</a> and run the training below.
331
- You will be billed by the minute from when you activate the GPU until when it is turned off.</p>
332
- <p class="actions">
333
- <a href="https://huggingface.co/spaces/ClaireOzzz/train-dreambooth-lora-sdxl/settings">🔥 &nbsp; Set recommended GPU</a>
334
- </p>
335
- </div>
336
- ''', elem_id="warning-setgpu")
337
-
338
- gr.Markdown("# SD-XL Dreambooth LoRa Training UI 💭")
339
-
340
- upload_my_images = gr.Checkbox(label="Drop your training images ? (optional)", value=False)
341
- gr.Markdown("Use this step to upload your training images and create a new dataset. If you already have a dataset stored on your HF profile, you can skip this step, and provide your dataset ID in the training `Datased ID` input below.")
342
-
343
- with gr.Group(visible=False, elem_id="upl-dataset-group") as upload_group:
344
- with gr.Row():
345
- images = gr.File(file_types=["image"], label="Upload your images", file_count="multiple", interactive=True, visible=True)
346
- with gr.Column():
347
- new_dataset_name = gr.Textbox(label="Set new dataset name", placeholder="e.g.: my_awesome_dataset")
348
- dataset_status = gr.Textbox(label="dataset status")
349
- load_btn = gr.Button("Load images to new dataset", elem_id="load-dataset-btn")
350
-
351
- gr.Markdown("## Training ")
352
- gr.Markdown("You can use an existing image dataset, find a dataset example here: [https://huggingface.co/datasets/diffusers/dog-example](https://huggingface.co/datasets/diffusers/dog-example) ;)")
353
-
354
- with gr.Row():
355
- dataset_id = gr.Textbox(label="Dataset ID", info="use one of your previously uploaded image datasets on your HF profile", placeholder="diffusers/dog-example")
356
- instance_prompt = gr.Textbox(label="Concept prompt", info="concept prompt - use a unique, made up word to avoid collisions")
357
-
358
- with gr.Row():
359
- model_output_folder = gr.Textbox(label="Output model folder name", placeholder="lora-trained-xl-folder")
360
- max_train_steps = gr.Number(label="Max Training Steps", value=500, precision=0, step=10)
361
- checkpoint_steps = gr.Number(label="Checkpoints Steps", value=100, precision=0, step=10)
362
-
363
- remove_gpu = gr.Checkbox(label="Remove GPU After Training", value=True, info="If NOT enabled, don't forget to remove the GPU attribution after you are done.")
364
- train_button = gr.Button("Train !")
365
 
366
- train_status = gr.Textbox(label="Training status")
 
 
 
 
 
 
 
 
367
 
368
- upload_my_images.change(
369
- fn = check_upload_or_no,
370
- inputs =[upload_my_images],
371
- outputs = [upload_group]
372
- )
373
-
374
- load_btn.click(
375
- fn = load_images_to_dataset,
376
- inputs = [images, new_dataset_name],
377
- outputs = [dataset_status, dataset_id]
378
- )
379
-
380
- train_button.click(
381
- fn = main,
382
- inputs = [
383
- dataset_id,
384
- model_output_folder,
385
- instance_prompt,
386
- max_train_steps,
387
- checkpoint_steps,
388
- remove_gpu
389
- ],
390
- outputs = [train_status]
391
- )
392
 
393
- demo.launch(debug=True, share=True)
 
6
  import subprocess
7
  from subprocess import getoutput
8
  from huggingface_hub import login, HfFileSystem, snapshot_download, HfApi, create_repo
 
9
 
10
+ from app_train import create_training_demo
11
+ from sdxl.app_inference import create_inference_demo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  css="""
14
  #col-container {max-width: 780px; margin-left: auto; margin-right: auto;}
 
79
  """
80
 
81
  with gr.Blocks(css=css) as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
+ gr.Markdown("SUTD x SUNS Shop Design Generator")
84
+ with gr.Tab("Training"):
85
+ create_training_demo()
86
+ with gr.Tab("Generation"):
87
+ create_inference_demo()
88
+ with gr.Tab("Visualisation"):
89
+ gr.Markdown('''
90
+ - You can use this tab to upload models later if you choose not to upload models in training time or if upload in training time failed.
91
+ ''')
92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
+ demo.queue(max_size=1).launch(debug=True, share=True)