MMpose / main_noweb.py
xmrt's picture
markdown slider and max threads
d486c50
raw
history blame
No virus
13.9 kB
# Pose inferencing
import mmpose
from mmpose.apis import MMPoseInferencer
import torch
# Gradio
import gradio as gr
import moviepy.editor as moviepy
# System and files
import os
import glob
import uuid
import json
# Image manipulation
import numpy as np
import cv2
#import ffmpeg
print(torch.__version__)
# Use GPU if available
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
os.system("nvidia-smi")
print("[INFO]: Imported modules!")
human = MMPoseInferencer("simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192") # simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192 dekr_hrnet-w32_8xb10-140e_coco-512x512
hand = MMPoseInferencer("hand")
#"https://github.com/open-mmlab/mmpose/blob/main/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_simplebaseline3d_8xb64-200e_h36m.py",
#"https://download.openmmlab.com/mmpose/body3d/simple_baseline/simple3Dbaseline_h36m-f0ad73a4_20210419.pth") # pose3d="human3d"
#https://github.com/open-mmlab/mmpose/tree/main/configs/hand_2d_keypoint/topdown_regression
print("[INFO]: Downloaded models!")
def check_fps(video):
cap = cv2.VideoCapture(video)
nframes = cap.get(cv2.CAP_PROP_FRAME_COUNT)
fps = cap.get(cv2.CAP_PROP_FPS)
return nframes, fps
def get_frames(video, fps=50, height=512, width=512):
clip = moviepy.VideoFileClip(video)
print(clip.duration)
if clip.duration > 10:
raise gr.Error("Please provide or record a video shorter than 10 seconds...")
split_tup = os.path.splitext(video)
file_name = split_tup[0]
file_extension = split_tup[1]
if file_extension != ".mp4":
print("Converting to mp4")
video = file_name+".mp4"
#check fps
if clip.fps > fps:
print("vide rate is over 15, resetting to 15")
#clip_resized = moviepy.clip.resize(height=height)
clip.write_videofile(video, fps=fps)
else:
print("video rate is OK")
#clip_resized = clip.resize(height=height)
#clip.write_videofile(video, fps=clip.fps)
def check_extension(video):
split_tup = os.path.splitext(video)
# extract the file name and extension
file_name = split_tup[0]
file_extension = split_tup[1]
if file_extension != ".mp4":
print("Converting to mp4")
clip = moviepy.VideoFileClip(video)
video = file_name+".mp4"
clip.write_videofile(video)
return video
def pose3d(video, kpt_threshold):
video = check_extension(video)
print(device)
human3d = MMPoseInferencer(pose3d="human3d")
# Define new unique folder
add_dir = str(uuid.uuid4())
vis_out_dir = os.path.join("/".join(video.split("/")[:-1]), add_dir)
os.makedirs(add_dir)
print(check_fps(video))
result_generator = human3d(video,
vis_out_dir = add_dir,
radius = 5,
thickness=4,
rebase_keypoint_height=True,
kpt_thr=kpt_threshold,
device=device,
pred_out_dir = add_dir
)
result = [result for result in result_generator] #next(result_generator)
out_file = glob.glob(os.path.join(add_dir, "*.mp4")) #+ glob.glob(os.path.join(vis_out_dir, "*.webm"))
kpoints = glob.glob(os.path.join(add_dir, "*.json"))
print(kpoints)
# Reinitialize
return "".join(out_file)
def pose2d(video, kpt_threshold):
#video = check_extension(video)
get_frames(video)
# Define new unique folder
add_dir = str(uuid.uuid4())
vis_out_dir = os.path.join("/".join(video.split("/")[:-1]), add_dir)
os.makedirs(add_dir)
print(check_fps(video))
result_generator = human(video,
vis_out_dir = add_dir,
#return_vis=True,
radius = 5,
thickness=4,
rebase_keypoint_height=True,
kpt_thr=kpt_threshold,
device=device,
pred_out_dir = add_dir
)
result = [result for result in result_generator] #next(result_generator)
out_file = glob.glob(os.path.join(add_dir, "*.mp4")) #+ glob.glob(os.path.join(vis_out_dir, "*.webm"))
kpoints = glob.glob(os.path.join(add_dir, "*.json"))
print(kpoints)
return "".join(out_file), "".join(kpoints)
def pose2dhand(video, kpt_threshold):
video = check_extension(video)
print(device)
# ultraltics
# Define new unique folder
add_dir = str(uuid.uuid4())
vis_out_dir = os.path.join("/".join(video.split("/")[:-1]), add_dir)
os.makedirs(vis_out_dir)
result_generator = hand(video,
vis_out_dir = vis_out_dir,
return_vis=True,
thickness = 4,
radius = 5,
rebase_keypoint_height=True,
kpt_thr=kpt_threshold,
device=device)
result = [result for result in result_generator] #next(result_generator)
out_file = glob.glob(os.path.join(vis_out_dir, "*.mp4")) #+ glob.glob(os.path.join(vis_out_dir, "*.webm"))
return "".join(out_file)
def UI():
block = gr.Blocks()
with block:
with gr.Column():
with gr.Tab("Upload video"):
with gr.Column():
with gr.Row():
with gr.Column():
with gr.Row():
video_input = gr.Video(source="upload", type="filepath", height=512, width=512)
# Insert slider with kpt_thr
with gr.Column():
gr.Markdown("Drag the keypoint threshold to filter out lower probability keypoints:")
file_kpthr = gr.Slider(0, 1, value=0.3, label='Keypoint threshold')
with gr.Row():
submit_pose_file = gr.Button("Make 2d pose estimation")
submit_pose3d_file = gr.Button("Make 3d pose estimation")
submit_hand_file = gr.Button("Make 2d hand estimation")
with gr.Row():
video_output1 = gr.PlayableVideo(height=512, label = "Estimate human 2d poses", show_label=True)
video_output2 = gr.PlayableVideo(height=512, label = "Estimate human 3d poses", show_label=True)
video_output3 = gr.PlayableVideo(height=512, label = "Estimate human hand poses", show_label=True)
gr.Markdown("Download the .json file that contains the keypoint positions for each frame in the video.")
jsonoutput = gr.File(file_types=[".json"])
gr.Markdown("""There are multiple ways to interact with these keypoints.
\n The example below shows how you can calulate the angle on the elbow for example.
\n Copy the code into your own preferred interpreter and experiment with the keypoint file.
\n If you choose to run the code, start by installing the packages json and numpy. The complete overview of the keypoint indices can be seen in the tab 'General information'. """)
gr.Code(
value="""
# Importing packages needed
import json
import numpy as np
# First we load the data
with open(file_path, 'r') as json_file:
data = json.load(json_file)
# The we define a function for calculating angles
def calculate_angle(a, b, c):
a = np.array(a) # First point
b = np.array(b) # Middle point
c = np.array(c) # End point
radians = np.arctan2(c[1]-b[1], c[0]-b[0]) - np.arctan2(a[1]-b[1], a[0]-b[0])
angle = np.abs(radians*180.0/np.pi)
if angle >180.0:
angle = 360-angle
return angle
# We select the first identified person in the first frame (zero index) as an example
# To calculate the angle of the right elbow we take the point before and after and according to the indices that will be 6 (right shoulder) and 9 (right wrist)
predictions = data['predictions'][0] # Assuming batch_size is 1
# COCO keypoint indices
shoulder_index = 6
elbow_index = 8
wrist_index = 9
shoulder_point = data[0]['instances'][0]['keypoints'][shoulder_index]
elbow_point = data[0]['instances'][0]['keypoints'][elbow_index]
wrist_point = data[0]['instances'][0]['keypoints'][wrist_index]
angle = calculate_angle(shoulder_point, elbow_point, wrist_point)
print("Angle is: ", angle)
""",
language="python",
interactive=False,
show_label=False,
)
with gr.Tab("General information"):
gr.Markdown("""
\n # Information about the models
\n ## Pose models:
\n All the pose estimation models come from the library [MMpose](https://github.com/open-mmlab/mmpose). It is a library for human pose estimation that provides pre-trained models for 2D and 3D pose estimation.
\n The 2D pose model is used for estimating the 2D coordinates of human body joints from an image or a video frame. The model uses a convolutional neural network (CNN) to predict the joint locations and their confidence scores.
\n The 2D hand model is a specialized version of the 2D pose model that is designed for hand pose estimation. It uses a similar CNN architecture to the 2D pose model but is trained specifically for detecting the joints in the hand.
\n The 3D pose model is used for estimating the 3D coordinates of human body joints from an image or a video frame. The model uses a combination of 2D pose estimation and depth estimation to infer the 3D joint locations.
\n The keypoints in the 2D pose model has the following order:
\n ```
0: Nose
1: Left Eye
2: Right Eye
3: Left Ear
4: Right Ear
5: Left Shoulder
6: Right Shoulder
7: Left Elbow
8: Right Elbow
9: Left Wrist
10: Right Wrist
11: Left Hip
12: Right Hip
13: Left Knee
14: Right Knee
15: Left Ankle
16: Right Ankle
```
""")
# From file
submit_pose_file.click(fn=pose2d,
inputs= [video_input, file_kpthr],
outputs = [video_output1, jsonoutput],
queue=True)
submit_pose3d_file.click(fn=pose3d,
inputs= [video_input, file_kpthr],
outputs = [video_output2, jsonoutput],
queue=True)
submit_hand_file.click(fn=pose2dhand,
inputs= [video_input, file_kpthr],
outputs = [video_output3, jsonoutput],
queue=True)
return block
if __name__ == "__main__":
block = UI()
block.queue(
#concurrency_count=40, # When you increase the concurrency_count parameter in queue(), max_threads() in launch() is automatically increased as well.
#max_size=25, # Maximum number of requests that the queue processes
api_open = False # When creating a Gradio demo, you may want to restrict all traffic to happen through the user interface as opposed to the programmatic API that is automatically created for your Gradio demo.
).launch(
max_threads=41,
server_name="0.0.0.0",
server_port=7860,
auth=("novouser", "bstad2023")
)
# The total concurrency = number of processors * 10.
# 4vCPU 15 GB ram 40GV VRAM = 40?