File size: 14,130 Bytes
face1bf
 
 
 
 
 
 
 
 
 
 
27b01fb
face1bf
 
 
 
07f1d5d
face1bf
 
 
 
21f207b
face1bf
 
 
 
 
 
 
 
 
 
 
e6264ad
face1bf
b075423
ca4884f
 
b075423
88152ab
a36a050
92df7f4
face1bf
 
67454de
 
 
 
 
 
27b01fb
92df7f4
 
 
d486c50
92df7f4
 
7839c3e
27b01fb
 
 
 
 
 
 
 
 
 
beca0c4
27b01fb
a36a050
 
27b01fb
 
a36a050
7839c3e
27b01fb
 
face1bf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b18c135
face1bf
 
beca0c4
 
ca4884f
face1bf
 
 
 
 
b18c135
 
beca0c4
b18c135
 
 
77eca9b
b18c135
 
 
 
face1bf
 
 
b18c135
 
 
beca0c4
face1bf
0a04bec
face1bf
 
 
d486c50
a36a050
face1bf
 
 
21e50d2
897d384
67454de
face1bf
897d384
f8675c4
face1bf
 
 
 
 
897d384
face1bf
 
 
 
897d384
 
0fa6461
face1bf
67454de
face1bf
ca4884f
 
 
 
 
 
 
 
 
face1bf
 
 
 
 
 
 
 
0a04bec
face1bf
 
0a04bec
face1bf
 
 
 
0a04bec
 
face1bf
 
 
 
0a04bec
 
 
face1bf
0a04bec
face1bf
77eca9b
 
 
 
 
 
 
 
beca0c4
 
 
d486c50
 
92df7f4
77eca9b
 
 
 
 
 
7839c3e
 
 
b18c135
77eca9b
 
 
 
 
 
 
 
d486c50
77eca9b
 
 
 
 
 
 
b18c135
77eca9b
 
 
 
 
d7219e1
77eca9b
 
 
 
 
 
 
b18c135
77eca9b
 
 
b18c135
77eca9b
 
 
 
b18c135
77eca9b
 
 
b18c135
77eca9b
 
d7219e1
77eca9b
 
 
 
 
face1bf
 
 
77eca9b
 
 
face1bf
77eca9b
 
 
d7219e1
77eca9b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ca4884f
77eca9b
 
 
face1bf
77eca9b
 
92df7f4
77eca9b
 
 
b18c135
92df7f4
bbfea4f
77eca9b
face1bf
 
77eca9b
ca4884f
 
0814c1f
897d384
b2f044e
ca4884f
92df7f4
cc757fe
eeb66fe
bbfea4f
face1bf
fe4ffe7
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
# Pose inferencing
import mmpose
from mmpose.apis import MMPoseInferencer

import torch

# Gradio
import gradio as gr
import moviepy.editor as moviepy



# System and files
import os
import glob
import uuid
import json

# Image manipulation
import numpy as np
import cv2
#import ffmpeg

print(torch.__version__)
# Use GPU if available
if torch.cuda.is_available():
    device = torch.device("cuda")
else:
    device = torch.device("cpu")

os.system("nvidia-smi")

print("[INFO]: Imported modules!")
human = MMPoseInferencer("simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192") # simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192 dekr_hrnet-w32_8xb10-140e_coco-512x512
hand = MMPoseInferencer("hand")

hand.to(device)
human.to(device)
#"https://github.com/open-mmlab/mmpose/blob/main/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_simplebaseline3d_8xb64-200e_h36m.py", 
                           #"https://download.openmmlab.com/mmpose/body3d/simple_baseline/simple3Dbaseline_h36m-f0ad73a4_20210419.pth") # pose3d="human3d"
#https://github.com/open-mmlab/mmpose/tree/main/configs/hand_2d_keypoint/topdown_regression

print("[INFO]: Downloaded models!")

def check_fps(video):
    cap = cv2.VideoCapture(video)
    nframes = cap.get(cv2.CAP_PROP_FRAME_COUNT) 
    fps = cap.get(cv2.CAP_PROP_FPS)   
    return nframes, fps


def get_frames(video, fps=50, height=512, width=512):
    
    clip = moviepy.VideoFileClip(video)
    print(clip.duration)
    if clip.duration > 10:
            raise gr.Error("Please provide or record a video shorter than 10 seconds...")
    
    split_tup = os.path.splitext(video)

    file_name = split_tup[0]
    file_extension = split_tup[1]

    if file_extension != ".mp4":
        print("Converting to mp4")
        video = file_name+".mp4"

    #check fps
    if clip.fps > fps:
        print("vide rate is over 15, resetting to 15")
        #clip_resized = moviepy.clip.resize(height=height)
        clip.write_videofile(video, fps=fps)
    else:
        print("video rate is OK")
        #clip_resized = clip.resize(height=height)
        #clip.write_videofile(video, fps=clip.fps)
    

def check_extension(video):
    split_tup = os.path.splitext(video)

    # extract the file name and extension
    file_name = split_tup[0]
    file_extension = split_tup[1]

    if file_extension != ".mp4":
        print("Converting to mp4")
        clip = moviepy.VideoFileClip(video)

        video = file_name+".mp4"
        clip.write_videofile(video)
    
    return video


def pose3d(video, kpt_threshold):
    video = check_extension(video)
    print(device)

    human3d = MMPoseInferencer(pose3d="human3d")
    human3d.to(device)

    # Define new unique folder
    add_dir = str(uuid.uuid4())
    vis_out_dir = os.path.join("/".join(video.split("/")[:-1]), add_dir)

    os.makedirs(add_dir)
    print(check_fps(video))
    result_generator = human3d(video, 
                            vis_out_dir = add_dir,
                            radius = 5,
                            thickness=4,
                            rebase_keypoint_height=True,
                            kpt_thr=kpt_threshold,
                            device=device,
                            pred_out_dir = add_dir
                            )    
    
    result = [result for result in result_generator] #next(result_generator)        

    out_file = glob.glob(os.path.join(add_dir, "*.mp4")) #+ glob.glob(os.path.join(vis_out_dir, "*.webm")) 
    kpoints = glob.glob(os.path.join(add_dir, "*.json"))
    print(kpoints) 

    # Reinitialize
    return "".join(out_file),  "".join(kpoints)


def pose2d(video, kpt_threshold):
    #video = check_extension(video)
    get_frames(video)

    # Define new unique folder
    add_dir = str(uuid.uuid4())

    os.makedirs(add_dir)
    print(check_fps(video))
    result_generator = human(video, 
                            vis_out_dir = add_dir,
                            #return_vis=True,
                            radius = 5,
                            thickness=4,
                            rebase_keypoint_height=True,
                            kpt_thr=kpt_threshold,
                            device=device,
                            pred_out_dir = add_dir
                            )    
    
    result = [result for result in result_generator] #next(result_generator)        

    out_file = glob.glob(os.path.join(add_dir, "*.mp4")) #+ glob.glob(os.path.join(vis_out_dir, "*.webm")) 
    kpoints = glob.glob(os.path.join(add_dir, "*.json"))
    print(kpoints) 

    return "".join(out_file), "".join(kpoints)

def pose2dbatch(video, kpt_threshold):
    kpoints=[]
    outvids=[]
    for v, t in zip(video, kpt_threshold):
        vname, kname = pose2d(v, t)
        outvids.append(vname) 
        kpoints.append(kname)
    return kpoints, outvids
    

def pose2dhand(video, kpt_threshold):
    video = check_extension(video)
    print(device)
    # ultraltics

    # Define new unique folder
    add_dir = str(uuid.uuid4())
    os.makedirs(add_dir)

    result_generator = hand(video, 
                                 vis_out_dir = add_dir,
                                 thickness = 4,
                                 radius = 5,
                                 rebase_keypoint_height=True,
                                 kpt_thr=kpt_threshold,
                                 pred_out_dir = add_dir, 

                                 device=device)    
    
    result = [result for result in result_generator] #next(result_generator)        

    out_file = glob.glob(os.path.join(add_dir, "*.mp4")) #+ glob.glob(os.path.join(vis_out_dir, "*.webm")) 
    kpoints = glob.glob(os.path.join(add_dir, "*.json"))

   
    return "".join(out_file), "".join(kpoints)

def UI():
    block = gr.Blocks()
    with block:
        with gr.Column():       
            with gr.Tab("Upload video"):
                with gr.Column():
                    with gr.Row():
                        with gr.Column():
                            with gr.Row():
                                video_input = gr.Video(source="upload", type="filepath", height=512, width=512)
                                # Insert slider with kpt_thr
                                with gr.Column():
                                    gr.Markdown("Drag the keypoint threshold to filter out lower probability keypoints:")
                                    file_kpthr = gr.Slider(0, 1, value=0.3, label='Keypoint threshold')
                            with gr.Row():
                                submit_pose_file = gr.Button("Make 2d pose estimation")
                                submit_pose3d_file = gr.Button("Make 3d pose estimation")
                                submit_hand_file = gr.Button("Make 2d hand estimation")

                    with gr.Row():
                        video_output1 = gr.PlayableVideo(height=512,  label = "Estimate human 2d poses", show_label=True)
                        video_output2 = gr.PlayableVideo(height=512,  label = "Estimate human 3d poses", show_label=True)
                        video_output3 = gr.PlayableVideo(height=512,  label = "Estimate human hand poses", show_label=True)
                    
                    gr.Markdown("Download the .json file that contains the keypoint positions for each frame in the video.")
                    jsonoutput = gr.File(file_types=[".json"])
                    gr.Markdown("""There are multiple ways to interact with these keypoints. 
                                \n The example below shows how you can calulate the angle on the elbow for example.
                                \n Copy the code into your own preferred interpreter and experiment with the keypoint file. 
                                \n If you choose to run the code, start by installing the packages json and numpy. The complete overview of the keypoint indices can be seen in the tab 'General information'. """)
                    gr.Code(
                        value="""
                        
                        # Importing packages needed  
                        import json
                        import numpy as np
                        
                        # First we load the data
                        with open(file_path, 'r') as json_file:
                            data = json.load(json_file)

                        # The we define a function for calculating angles    
                        def calculate_angle(a, b, c):
                            a = np.array(a) # First point
                            b = np.array(b) # Middle point
                            c = np.array(c) # End point
                            
                            radians = np.arctan2(c[1]-b[1], c[0]-b[0]) - np.arctan2(a[1]-b[1], a[0]-b[0])
                            angle = np.abs(radians*180.0/np.pi)
                            
                            if angle >180.0:
                                angle = 360-angle
                                
                            return angle 

                        # We select the first identified person in the first frame (zero index) as an example
                        # To calculate the angle of the right elbow we take the point before and after and according to the indices that will be 6 (right shoulder) and 9 (right wrist)
                        predictions = data['predictions'][0]  # Assuming batch_size is 1

                        # COCO keypoint indices
                        shoulder_index = 6
                        elbow_index = 8
                        wrist_index = 9

                        shoulder_point = data[0]['instances'][0]['keypoints'][shoulder_index]
                        elbow_point = data[0]['instances'][0]['keypoints'][elbow_index]
                        wrist_point = data[0]['instances'][0]['keypoints'][wrist_index]

                        angle = calculate_angle(shoulder_point, elbow_point, wrist_point)
                        print("Angle is: ", angle)    

                        """,
                        language="python",
                        interactive=False,
                        show_label=False,
                    )



            with gr.Tab("General information"):
                gr.Markdown(""" 
                            \n # Information about the models 

                            \n ## Pose models: 
                            
                            \n All the pose estimation models come from the library [MMpose](https://github.com/open-mmlab/mmpose). It is a library for human pose estimation that provides pre-trained models for 2D and 3D pose estimation. 

                            \n The 2D pose model is used for estimating the 2D coordinates of human body joints from an image or a video frame. The model uses a convolutional neural network (CNN) to predict the joint locations and their confidence scores. 
                            
                            \n The 2D hand model is a specialized version of the 2D pose model that is designed for hand pose estimation. It uses a similar CNN architecture to the 2D pose model but is trained specifically for detecting the joints in the hand. 
                            
                            \n The 3D pose model is used for estimating the 3D coordinates of human body joints from an image or a video frame. The model uses a combination of 2D pose estimation and depth estimation to infer the 3D joint locations. 
                            
                            \n The keypoints in the 2D pose model has the following order:

                            \n ``` 
                            0: Nose
                            1: Left Eye
                            2: Right Eye
                            3: Left Ear
                            4: Right Ear
                            5: Left Shoulder
                            6: Right Shoulder
                            7: Left Elbow
                            8: Right Elbow
                            9: Left Wrist
                            10: Right Wrist
                            11: Left Hip
                            12: Right Hip
                            13: Left Knee
                            14: Right Knee
                            15: Left Ankle
                            16: Right Ankle 
                            ```
                            """)

                

            # From file
            submit_pose_file.click(fn=pose2dbatch, 
                                inputs=  [video_input, file_kpthr], 
                                outputs = [video_output1, jsonoutput],
                                queue=True)
            
            submit_pose3d_file.click(fn=pose3d, 
                                    inputs= [video_input, file_kpthr], 
                                    outputs = [video_output2, jsonoutput],
                                    queue=True)
            
            submit_hand_file.click(fn=pose2dhand, 
                                inputs= [video_input, file_kpthr], 
                                outputs = [video_output3, jsonoutput],
                                queue=True)
        return block
        
if __name__ == "__main__":
    block = UI()
    block.queue(max_size=50, 
                concurrency_count=20, # When you increase the concurrency_count parameter in queue(), max_threads() in launch() is automatically increased as well.
                #max_size=25, # Maximum number of requests that the queue processes
                api_open = False # When creating a Gradio demo, you may want to restrict all traffic to happen through the user interface as opposed to the programmatic API that is automatically created for your Gradio demo.  
                    ).launch(
                 #max_threads=41,
                 server_name="0.0.0.0", 
                 server_port=7860,
                 auth=("novouser", "bstad2023")
                 )

    # The total concurrency = number of processors * 10.
    # 4vCPU 15 GB ram 40GV VRAM = 40?