Skip to content

Commit

Permalink
Update scripts for new release v1.3.2
Browse files Browse the repository at this point in the history
  • Loading branch information
joachimpoutaraud committed Jan 4, 2024
1 parent b0e30b4 commit 5f46882
Show file tree
Hide file tree
Showing 5 changed files with 4,673 additions and 81 deletions.
4,587 changes: 4,586 additions & 1 deletion musicalgestures/MusicalGesturesToolbox.ipynb

Large diffs are not rendered by default.

3 changes: 2 additions & 1 deletion musicalgestures/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
from musicalgestures._videoreader import mg_videoreader
from musicalgestures._flow import Flow
from musicalgestures._audio import MgAudio
from musicalgestures._utils import convert, convert_to_mp4, get_framecount, ffmpeg_cmd
from musicalgestures._utils import MgFigure, MgImage, convert, convert_to_mp4, get_framecount, ffmpeg_cmd

Check failure on line 7 in musicalgestures/__init__.py

View workflow job for this annotation

GitHub Actions / build (3.10)

Ruff (F401)

musicalgestures/__init__.py:7:37: F401 `musicalgestures._utils.MgFigure` imported but unused

Check failure on line 7 in musicalgestures/__init__.py

View workflow job for this annotation

GitHub Actions / build (3.10)

Ruff (F401)

musicalgestures/__init__.py:7:47: F401 `musicalgestures._utils.MgImage` imported but unused
from musicalgestures._mglist import MgList

Check failure on line 8 in musicalgestures/__init__.py

View workflow job for this annotation

GitHub Actions / build (3.10)

Ruff (F401)

musicalgestures/__init__.py:8:37: F401 `musicalgestures._mglist.MgList` imported but unused


class MgVideo(MgAudio):
Expand Down
5 changes: 3 additions & 2 deletions musicalgestures/_motionvideo.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,8 +198,9 @@ def mg_motion(
i += 1

# Terminate the processes
video_out.stdin.close()
video_out.wait()
if save_video:
video_out.stdin.close()
video_out.wait()
process.terminate()

if save_motiongrams:
Expand Down
148 changes: 77 additions & 71 deletions musicalgestures/_pose.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import os
import numpy as np
import pandas as pd
from musicalgestures._utils import MgProgressbar, convert_to_avi, extract_wav, embed_audio_in_video, roundup, frame2ms, generate_outfilename, in_colab
from musicalgestures._utils import MgProgressbar, convert_to_avi, extract_wav, embed_audio_in_video, roundup, frame2ms, generate_outfilename, in_colab, ffmpeg_cmd
import musicalgestures
import itertools

Expand Down Expand Up @@ -116,18 +116,10 @@ def pose(
else:
filename = self.filename

vidcap = cv2.VideoCapture(filename)
ret, frame = vidcap.read()
inWidth = int(roundup(self.width/downsampling_factor, 2))
inHeight = int(roundup(self.height/downsampling_factor, 2))

fps = int(vidcap.get(cv2.CAP_PROP_FPS))
width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))

inWidth = int(roundup(width/downsampling_factor, 2))
inHeight = int(roundup(height/downsampling_factor, 2))

pb = MgProgressbar(total=length, prefix='Rendering pose estimation video:')
pb = MgProgressbar(total=self.length, prefix='Rendering pose estimation video:')

if save_video:
if target_name_video == None:
Expand All @@ -137,84 +129,99 @@ def pose(
target_name_video = os.path.splitext(target_name_video) + fex
if not overwrite:
target_name_video = generate_outfilename(target_name_video)
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter(target_name_video, fourcc, fps, (width, height))

# Pipe video with FFmpeg for reading frame by frame
cmd = ['ffmpeg', '-y', '-i', filename] # define ffmpeg command
process = ffmpeg_cmd(cmd, total_time=self.length, pipe='read')
video_out = None

ii = 0
data = []

while(vidcap.isOpened()):
ret, frame = vidcap.read()
if ret:

inpBlob = cv2.dnn.blobFromImage(
frame, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)
while True:
# Read frame-by-frame
out = process.stdout.read(self.width*self.height*3)

net.setInput(inpBlob)

output = net.forward()
if out == b'':
pb.progress(self.length)
break

H = output.shape[2]
W = output.shape[3]
points = []
# Transform the bytes read into a numpy array
frame = np.frombuffer(out, dtype=np.uint8).reshape([self.height, self.width, 3]) # height, width, channels

for i in range(nPoints):
inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)
net.setInput(inpBlob)
output = net.forward()

# confidence map of corresponding body's part.
probMap = output[0, i, :, :]
H = output.shape[2]
W = output.shape[3]
points = []

# Find global maxima of the probMap.
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
for i in range(nPoints):

# Scale the point to fit on the original image
x = (width * point[0]) / W
y = (height * point[1]) / H
# confidence map of corresponding body's part.
probMap = output[0, i, :, :]

if prob > threshold:
points.append((int(x), int(y)))
# Find global maxima of the probMap.
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)

else:
points.append(None)

if save_data:
time = frame2ms(ii, fps)
points_list = [[list(point)[0]/width, list(point)[1]/height, ] if point != None else [
0, 0] for point in points]
points_list_flat = itertools.chain.from_iterable(points_list)
datapoint = [time]
datapoint += points_list_flat
data.append(datapoint)

for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]

if points[partA] and points[partB]:
cv2.line(frame, points[partA], points[partB],
(0, 255, 255), 2, lineType=cv2.LINE_AA)
cv2.circle(
frame, points[partA], 4, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)
cv2.circle(
frame, points[partB], 4, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)

if save_video:
out.write(frame.astype(np.uint8))
# Scale the point to fit on the original image
x = (self.width * point[0]) / W
y = (self.height * point[1]) / H

else:
pb.progress(length)
break
if prob > threshold:
points.append((int(x), int(y)))

else:
points.append(None)

if save_data:
time = frame2ms(ii, self.fps)
points_list = [[list(point)[0]/self.width, list(point)[1]/self.height, ] if point != None else [
0, 0] for point in points]
points_list_flat = itertools.chain.from_iterable(points_list)
datapoint = [time]
datapoint += points_list_flat
data.append(datapoint)

for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]

if points[partA] and points[partB]:
cv2.line(frame, points[partA], points[partB],
(0, 255, 255), 2, lineType=cv2.LINE_AA)
cv2.circle(
frame, points[partA], 4, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)
cv2.circle(
frame, points[partB], 4, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)

if save_video:
if video_out is None:
cmd =['ffmpeg', '-y', '-s', '{}x{}'.format(frame.shape[1], frame.shape[0]),
'-r', str(self.fps), '-f', 'rawvideo', '-pix_fmt', 'bgr24', '-vcodec', 'rawvideo',
'-i', '-', '-vcodec', 'libx264', '-pix_fmt', 'yuv420p', target_name_video]
video_out = ffmpeg_cmd(cmd, total_time=self.length, pipe='write')

video_out.stdin.write(frame.astype(np.uint8))

# Flush the buffer
process.stdout.flush()
pb.progress(ii)
ii += 1

# Terminate the processes
if save_video:
out.release()
destination_video = target_name_video
video_out.stdin.close()
video_out.wait()
# Check if the original video fil has audio
if self.has_audio:
source_audio = extract_wav(of + fex)
embed_audio_in_video(source_audio, destination_video)
embed_audio_in_video(source_audio, target_name_video)
os.remove(source_audio)

process.terminate()

def save_txt(of, width, height, model, data, data_format, target_name_data, overwrite):
"""
Helper function to export pose estimation data as textfile(s).
Expand Down Expand Up @@ -328,13 +335,12 @@ def save_single_file(of, width, height, model, data, data_format, target_name_da
target_name_data=target_name_data, overwrite=overwrite)

if save_data:
save_txt(of, width, height, model, data, data_format,
save_txt(of, self.width, self.height, model, data, data_format,
target_name_data=target_name_data, overwrite=overwrite)

if save_video:
# save result as pose_video for parent MgVideo
self.pose_video = musicalgestures.MgVideo(
destination_video, color=self.color, returned_by_process=True)
self.pose_video = musicalgestures.MgVideo(target_name_video, color=self.color, returned_by_process=True)
return self.pose_video
else:
# otherwise just return the parent MgVideo
Expand Down
11 changes: 5 additions & 6 deletions musicalgestures/_show.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
# from IPython.core.display import Video
from base64 import b64encode
import musicalgestures
from musicalgestures._utils import in_colab


def mg_show(self, filename=None, key=None, mode='windowed', window_width=640, window_height=480, window_title=None):
Expand Down Expand Up @@ -39,7 +38,7 @@ def show(file, width=640, height=480, mode='windowed', title='Untitled', parent=
"""

# Check's if the environment is a Google Colab document
if in_colab():
if musicalgestures._utils.in_colab():
mode = 'notebook'

if mode.lower() == 'windowed':
Expand Down Expand Up @@ -95,26 +94,26 @@ def colab_display(video_to_display, video_width, video_height):
if file_dir == cwd:
try:
video_to_display = os.path.relpath(video_to_display, os.getcwd()).replace('\\', '/')
if in_colab():
if musicalgestures._utils.in_colab():
display(colab_display(video_to_display, video_width, video_height))
else:
display(Video(video_to_display,width=video_width, height=video_height))
except ValueError:
video_to_display = os.path.abspath(video_to_display, os.getcwd()).replace('\\', '/')
if in_colab():
if musicalgestures._utils.in_colab():
display(colab_display(video_to_display, video_width, video_height))
else:
display(Video(video_to_display, width=video_width, height=video_height))
else:
try:
video_to_display = os.path.relpath(video_to_display, os.getcwd()).replace('\\', '/')
if in_colab():
if musicalgestures._utils.in_colab():
display(colab_display(video_to_display, video_width, video_height))
else:
display(Video(video_to_display, width=video_width, height=video_height))
except ValueError:
video_to_display = os.path.abspath(video_to_display, os.getcwd()).replace('\\', '/')
if in_colab():
if musicalgestures._utils.in_colab():
display(colab_display(video_to_display, video_width, video_height))
else:
display(Video(video_to_display, width=video_width,height=video_height))
Expand Down

0 comments on commit 5f46882

Please sign in to comment.