removed states and batches
Browse files- main_noweb.py +12 -22
main_noweb.py
CHANGED
@@ -32,9 +32,9 @@ print("[INFO]: Imported modules!")
|
|
32 |
human = MMPoseInferencer("simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192") # simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192 dekr_hrnet-w32_8xb10-140e_coco-512x512
|
33 |
hand = MMPoseInferencer("hand")
|
34 |
#model3d = gr.State()
|
35 |
-
human3d = MMPoseInferencer(device=device,
|
36 |
-
|
37 |
-
|
38 |
|
39 |
|
40 |
#"https://github.com/open-mmlab/mmpose/blob/main/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_simplebaseline3d_8xb64-200e_h36m.py",
|
@@ -99,7 +99,7 @@ def pose3d(video, kpt_threshold):
|
|
99 |
video = check_extension(video)
|
100 |
print(device)
|
101 |
|
102 |
-
|
103 |
|
104 |
print("HUMAN 3d downloaded!!")
|
105 |
# Define new unique folder
|
@@ -109,14 +109,14 @@ def pose3d(video, kpt_threshold):
|
|
109 |
os.makedirs(add_dir)
|
110 |
print(check_fps(video))
|
111 |
#video = human3d.preprocess(video, batch_size=8)
|
112 |
-
result_generator =
|
113 |
vis_out_dir = add_dir,
|
114 |
radius = 8,
|
115 |
thickness = 5,
|
116 |
rebase_keypoint_height=True,
|
117 |
kpt_thr=kpt_threshold,
|
118 |
pred_out_dir = add_dir
|
119 |
-
)
|
120 |
print("INFERENCE DONW")
|
121 |
result = [result for result in result_generator] #next(result_generator)
|
122 |
|
@@ -156,16 +156,6 @@ def pose2d(video, kpt_threshold):
|
|
156 |
|
157 |
return "".join(out_file), "".join(kpoints)
|
158 |
|
159 |
-
def pose3dbatch(video, kpt_threshold):
|
160 |
-
kpoints=[]
|
161 |
-
outvids=[]
|
162 |
-
for v, t in zip(video, kpt_threshold):
|
163 |
-
vname, kname = pose3d(v, t)
|
164 |
-
outvids.append(vname)
|
165 |
-
kpoints.append(kname)
|
166 |
-
return [outvids]#kpoints, outvids
|
167 |
-
|
168 |
-
|
169 |
def pose2dhand(video, kpt_threshold):
|
170 |
video = check_extension(video)
|
171 |
print(device)
|
@@ -320,11 +310,11 @@ print("Angle is: ", angle)
|
|
320 |
outputs = [video_output1, jsonoutput],
|
321 |
queue=True)
|
322 |
|
323 |
-
submit_pose3d_file.click(fn=
|
324 |
inputs= [video_input, file_kpthr],
|
325 |
-
outputs =
|
326 |
-
batch=True,
|
327 |
-
max_batch_size=16,
|
328 |
queue=True) # Sometimes it worked with queue false? But still slow
|
329 |
|
330 |
submit_hand_file.click(fn=pose2dhand,
|
@@ -333,8 +323,8 @@ print("Angle is: ", angle)
|
|
333 |
queue=True)
|
334 |
|
335 |
if __name__ == "__main__":
|
336 |
-
block.queue(max_size=
|
337 |
-
concurrency_count=40, # When you increase the concurrency_count parameter in queue(), max_threads() in launch() is automatically increased as well.
|
338 |
#max_size=25, # Maximum number of requests that the queue processes
|
339 |
api_open = False # When creating a Gradio demo, you may want to restrict all traffic to happen through the user interface as opposed to the programmatic API that is automatically created for your Gradio demo.
|
340 |
).launch(
|
|
|
32 |
human = MMPoseInferencer("simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192") # simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192 dekr_hrnet-w32_8xb10-140e_coco-512x512
|
33 |
hand = MMPoseInferencer("hand")
|
34 |
#model3d = gr.State()
|
35 |
+
# human3d = MMPoseInferencer(device=device,
|
36 |
+
# pose3d="human3d",
|
37 |
+
# scope="mmpose")
|
38 |
|
39 |
|
40 |
#"https://github.com/open-mmlab/mmpose/blob/main/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_simplebaseline3d_8xb64-200e_h36m.py",
|
|
|
99 |
video = check_extension(video)
|
100 |
print(device)
|
101 |
|
102 |
+
human3d = MMPoseInferencer(device=device, pose3d="human3d", scope="mmpose")#"pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m")
|
103 |
|
104 |
print("HUMAN 3d downloaded!!")
|
105 |
# Define new unique folder
|
|
|
109 |
os.makedirs(add_dir)
|
110 |
print(check_fps(video))
|
111 |
#video = human3d.preprocess(video, batch_size=8)
|
112 |
+
result_generator = human3d(video,
|
113 |
vis_out_dir = add_dir,
|
114 |
radius = 8,
|
115 |
thickness = 5,
|
116 |
rebase_keypoint_height=True,
|
117 |
kpt_thr=kpt_threshold,
|
118 |
pred_out_dir = add_dir
|
119 |
+
)
|
120 |
print("INFERENCE DONW")
|
121 |
result = [result for result in result_generator] #next(result_generator)
|
122 |
|
|
|
156 |
|
157 |
return "".join(out_file), "".join(kpoints)
|
158 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
159 |
def pose2dhand(video, kpt_threshold):
|
160 |
video = check_extension(video)
|
161 |
print(device)
|
|
|
310 |
outputs = [video_output1, jsonoutput],
|
311 |
queue=True)
|
312 |
|
313 |
+
submit_pose3d_file.click(fn=pose3d,
|
314 |
inputs= [video_input, file_kpthr],
|
315 |
+
outputs = [video_output2, jsonoutput],
|
316 |
+
#batch=True,
|
317 |
+
#max_batch_size=16,
|
318 |
queue=True) # Sometimes it worked with queue false? But still slow
|
319 |
|
320 |
submit_hand_file.click(fn=pose2dhand,
|
|
|
323 |
queue=True)
|
324 |
|
325 |
if __name__ == "__main__":
|
326 |
+
block.queue(max_size=20,
|
327 |
+
#concurrency_count=40, # When you increase the concurrency_count parameter in queue(), max_threads() in launch() is automatically increased as well.
|
328 |
#max_size=25, # Maximum number of requests that the queue processes
|
329 |
api_open = False # When creating a Gradio demo, you may want to restrict all traffic to happen through the user interface as opposed to the programmatic API that is automatically created for your Gradio demo.
|
330 |
).launch(
|