xmrt commited on
Commit
fc47c19
1 Parent(s): e128b40
Files changed (1) hide show
  1. main.py +12 -11
main.py CHANGED
@@ -22,6 +22,12 @@ import uuid
22
  import numpy as np
23
  import cv2
24
 
 
 
 
 
 
 
25
  print("[INFO]: Imported modules!")
26
  human = MMPoseInferencer("human")
27
  hand = MMPoseInferencer("hand")
@@ -29,11 +35,6 @@ human3d = MMPoseInferencer(pose3d="human3d")
29
  track_model = YOLO('yolov8n.pt') # Load an official Detect model
30
 
31
 
32
- if torch.cuda.is_available():
33
- device = "cuda"
34
- else:
35
- device = "cpu"
36
-
37
  print("[INFO]: Downloaded models!")
38
 
39
  def check_extension(video):
@@ -114,11 +115,11 @@ def pose3d(video):
114
  os.makedirs(vis_out_dir)
115
 
116
  result_generator = human3d(video,
117
- vis_out_dir = vis_out_dir,
118
- thickness=2,
119
- return_vis=True,
120
- rebase_keypoint_height=True,
121
- device=device)
122
 
123
  result = [result for result in result_generator] #next(result_generator)
124
 
@@ -231,7 +232,7 @@ def run_UI():
231
 
232
  gr.Markdown("""
233
  \n # Information about the models
234
-
235
  \n ## Pose models: All the pose estimation models comes from the library [MMpose](https://github.com/open-mmlab/mmpose). It is a library for human pose estimation that provides pre-trained models for 2D and 3D pose estimation.
236
 
237
  \n ### The 2D pose model is used for estimating the 2D coordinates of human body joints from an image or a video frame. The model uses a convolutional neural network (CNN) to predict the joint locations and their confidence scores.
 
22
  import numpy as np
23
  import cv2
24
 
25
+ # Use GPU if available
26
+ if torch.cuda.is_available():
27
+ device = torch.device("cuda")
28
+ else:
29
+ device = torch.device("cpu")
30
+
31
  print("[INFO]: Imported modules!")
32
  human = MMPoseInferencer("human")
33
  hand = MMPoseInferencer("hand")
 
35
  track_model = YOLO('yolov8n.pt') # Load an official Detect model
36
 
37
 
 
 
 
 
 
38
  print("[INFO]: Downloaded models!")
39
 
40
  def check_extension(video):
 
115
  os.makedirs(vis_out_dir)
116
 
117
  result_generator = human3d(video,
118
+ vis_out_dir = vis_out_dir,
119
+ thickness=2,
120
+ return_vis=True,
121
+ rebase_keypoint_height=True,
122
+ device=device)
123
 
124
  result = [result for result in result_generator] #next(result_generator)
125
 
 
232
 
233
  gr.Markdown("""
234
  \n # Information about the models
235
+
236
  \n ## Pose models: All the pose estimation models comes from the library [MMpose](https://github.com/open-mmlab/mmpose). It is a library for human pose estimation that provides pre-trained models for 2D and 3D pose estimation.
237
 
238
  \n ### The 2D pose model is used for estimating the 2D coordinates of human body joints from an image or a video frame. The model uses a convolutional neural network (CNN) to predict the joint locations and their confidence scores.