xmrt commited on
Commit
91f9212
1 Parent(s): 7a6038c

Added ultralytics

Browse files
Files changed (2) hide show
  1. main.py +64 -22
  2. requirements.txt +2 -1
main.py CHANGED
@@ -3,7 +3,7 @@ import mmpose
3
  from mmpose.apis import MMPoseInferencer
4
 
5
  # Ultralytics
6
- #from ultralytics import YOLO
7
 
8
  # Gradio
9
  import gradio as gr
@@ -26,7 +26,7 @@ human3d = MMPoseInferencer(pose3d="human3d")
26
  # Defining inferencer models to lookup in function
27
  inferencers = {"Estimate human 2d poses":human, "Estimate human 2d hand poses":hand, "Estimate human 3d poses":human3d}
28
 
29
- #track_model = YOLO('yolov8n.pt') # Load an official Detect model
30
 
31
  print("[INFO]: Downloaded models!")
32
 
@@ -43,49 +43,87 @@ def tracking(video, model, boxes=True):
43
 
44
 
45
 
46
- def poses(photo, check):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  # Selecting the specific inferencer
48
  out_files=[]
 
49
  for i in check:
 
 
50
  inferencer = inferencers[i] # 'hand', 'human , device='cuda'
51
 
52
  print("[INFO]: Running inference!")
53
- # Create out directory
54
- vis_out_dir = str(uuid.uuid4())
 
55
 
56
- result_generator = inferencer(photo,
57
- vis_out_dir = vis_out_dir,
58
- return_vis=True,
59
- thickness=2,
60
- rebase_keypoint_height=True)
61
-
62
- result = [result for result in result_generator] #next(result_generator)
63
-
64
- out_file = glob.glob(os.path.join(vis_out_dir, "*.mp4"))
65
- # 00000.mp4
66
- # 000000.mp4
67
  out_files.append(out_file)
68
 
69
  return out_files
70
 
71
  def run():
72
  #https://github.com/open-mmlab/mmpose/blob/main/docs/en/user_guides/inference.md
73
- check_web = gr.CheckboxGroup(choices = ["Estimate human 2d poses", "Estimate human 2d hand poses", "Estimate human 3d poses"], label="Methods", type="value", info="Select the model(s) you want")
74
- check_file = gr.CheckboxGroup(choices = ["Estimate human 2d poses", "Estimate human 2d hand poses", "Estimate human 3d poses"], label="Methods", type="value", info="Select the model(s) you want")
 
75
 
76
  webcam = gr.Interface(
77
- fn=poses,
78
  inputs= [gr.Video(source="webcam", height=412), check_web],
79
- outputs = [gr.PlayableVideo(), gr.PlayableVideo(), gr.PlayableVideo()],
80
  title = 'Pose estimation',
81
  description = 'Pose estimation on video',
82
  allow_flagging=False
83
  )
84
 
85
  file = gr.Interface(
86
- poses,
87
  inputs = [gr.Video(source="upload", height=412), check_file],
88
- outputs = [gr.PlayableVideo(),gr.PlayableVideo(),gr.PlayableVideo()],
89
  allow_flagging=False
90
  )
91
 
@@ -108,3 +146,7 @@ if __name__ == "__main__":
108
  # videopose_h36m_27frames_fullconv_supervised-fe8fbba9_20210527.pth
109
  # videopose_h36m_1frame_fullconv_supervised_cpn_ft-5c3afaed_20210527.pth
110
  # https://github.com/open-mmlab/mmpose/blob/main/mmpose/apis/inferencers/pose3d_inferencer.py
 
 
 
 
 
3
  from mmpose.apis import MMPoseInferencer
4
 
5
  # Ultralytics
6
+ from ultralytics import YOLO
7
 
8
  # Gradio
9
  import gradio as gr
 
26
  # Defining inferencer models to lookup in function
27
  inferencers = {"Estimate human 2d poses":human, "Estimate human 2d hand poses":hand, "Estimate human 3d poses":human3d}
28
 
29
+ track_model = YOLO('yolov8n.pt') # Load an official Detect model
30
 
31
  print("[INFO]: Downloaded models!")
32
 
 
43
 
44
 
45
 
46
+ def show_tracking(video_content, vis_out_dir):
47
+ video = cv2.VideoCapture(video_content)
48
+
49
+ # Track
50
+ video_track = tracking(video_content, track_model.track)
51
+
52
+ # Prepare to save video
53
+ out_file = os.path.join(vis_out_dir, "track.mp4")
54
+
55
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v") # Codec for MP4 video
56
+ fps = video.get(cv2.CAP_PROP_FPS)
57
+ height, width, _ = video_track[0][0].orig_img.shape
58
+ size = (width,height)
59
+
60
+ out_track = cv2.VideoWriter(out_file, fourcc, fps, size)
61
+
62
+ # Go through frames and write them
63
+ for frame_track in video_track:
64
+ result_track = frame_track[0].plot() # plot a BGR numpy array of predictions
65
+
66
+ #print(type(result_pose)) numpy ndarray
67
+ out_track.write(result_track)
68
+
69
+ out_track.release()
70
+
71
+ video.release()
72
+ cv2.destroyAllWindows() # Closing window
73
+
74
+ return out_file
75
+
76
+
77
+ def poses(inferencer, video, vis_out_dir):
78
+
79
+ result_generator = inferencer(video,
80
+ vis_out_dir = vis_out_dir,
81
+ return_vis=True,
82
+ thickness=2,
83
+ rebase_keypoint_height=True)
84
+
85
+ result = [result for result in result_generator] #next(result_generator)
86
+
87
+ out_file = glob.glob(os.path.join(vis_out_dir, "*.mp4"))
88
+ return out_file
89
+
90
+ def infer(video, check):
91
  # Selecting the specific inferencer
92
  out_files=[]
93
+
94
  for i in check:
95
+ # Create out directory
96
+ vis_out_dir = str(uuid.uuid4())
97
  inferencer = inferencers[i] # 'hand', 'human , device='cuda'
98
 
99
  print("[INFO]: Running inference!")
100
+ out_file = poses(inferencer, video, vis_out_dir)
101
+ if i == "Detect and track":
102
+ show_tracking(video, vis_out_dir)
103
 
 
 
 
 
 
 
 
 
 
 
 
104
  out_files.append(out_file)
105
 
106
  return out_files
107
 
108
  def run():
109
  #https://github.com/open-mmlab/mmpose/blob/main/docs/en/user_guides/inference.md
110
+ check_web = gr.CheckboxGroup(choices = ["Estimate human 2d poses", "Estimate human 2d hand poses", "Estimate human 3d poses", "Detect and track"], label="Methods", type="value", info="Select the model(s) you want")
111
+ check_file = gr.CheckboxGroup(choices = ["Estimate human 2d poses", "Estimate human 2d hand poses", "Estimate human 3d poses", "Detect and track"], label="Methods", type="value", info="Select the model(s) you want")
112
+
113
 
114
  webcam = gr.Interface(
115
+ fn=infer,
116
  inputs= [gr.Video(source="webcam", height=412), check_web],
117
+ outputs = [gr.PlayableVideo(), gr.PlayableVideo(), gr.PlayableVideo(), gr.PlayableVideo()],
118
  title = 'Pose estimation',
119
  description = 'Pose estimation on video',
120
  allow_flagging=False
121
  )
122
 
123
  file = gr.Interface(
124
+ infer,
125
  inputs = [gr.Video(source="upload", height=412), check_file],
126
+ outputs = [gr.PlayableVideo(), gr.PlayableVideo(), gr.PlayableVideo(), gr.PlayableVideo()],
127
  allow_flagging=False
128
  )
129
 
 
146
  # videopose_h36m_27frames_fullconv_supervised-fe8fbba9_20210527.pth
147
  # videopose_h36m_1frame_fullconv_supervised_cpn_ft-5c3afaed_20210527.pth
148
  # https://github.com/open-mmlab/mmpose/blob/main/mmpose/apis/inferencers/pose3d_inferencer.py
149
+
150
+
151
+ # 00000.mp4
152
+ # 000000.mp4
requirements.txt CHANGED
@@ -1,3 +1,4 @@
1
  gradio
2
  numpy
3
- opencv-python
 
 
1
  gradio
2
  numpy
3
+ opencv-python
4
+ ultralytics