Skip to content

Commit

Permalink
joss draft
Browse files Browse the repository at this point in the history
  • Loading branch information
davidpagnon committed Feb 16, 2024
1 parent 4617217 commit 579b093
Show file tree
Hide file tree
Showing 4 changed files with 382 additions and 2 deletions.
Binary file added Content/joint_convention.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
289 changes: 289 additions & 0 deletions Content/paper.bib
Original file line number Diff line number Diff line change
@@ -0,0 +1,289 @@
@article{Beaucage_2019,
title={Validation of an OpenSim full-body model with detailed lumbar spine for estimating lower lumbar spine loads during symmetric and asymmetric lifting tasks},
author={Beaucage-Gauvreau, Erica and Robertson, William SP and Brandon, Scott CE and Fraser, Robert and Freeman, Brian JC and Graham, Ryan B and Thewlis, Dominic and Jones, Claire F},
journal={Computer methods in biomechanics and biomedical engineering},
DOI = {10.1080/10255842.2018.1564819},
volume={22},
number={5},
pages={451--464},
year={2019},
publisher={Taylor \& Francis}
}

@article{Bradski_2000,
author = {Bradski, G.},
citeulike-article-id = {2236121},
journal = {Dr. Dobb's Journal of Software Tools},
keywords = {bibtex-import},
posted-at = {2008-01-15 19:21:54},
priority = {4},
title = {The OpenCV Library},
year = {2000}
}

@article{Butterworth_1930,
title={On the theory of filter amplifiers},
author={Butterworth, Stephen},
journal={Wireless Engineer},
volume={7},
number={6},
pages={536--541},
year={1930}
}

@article{Cao_2019,
title={OpenPose: realtime multi-person 2D pose estimation using Part Affinity Fields},
author={Cao, Zhe and Hidalgo, Gines and Simon, Tomas and Wei, Shih-En and Sheikh, Yaser},
journal={IEEE transactions on pattern analysis and machine intelligence},
volume={43},
number={1},
pages={172--186},
year={2019},
URL = {https://arxiv.org/abs/1611.08050},
DOI = {10.1109/TPAMI.2019.2929257},
publisher={IEEE}
}

@article{Cleveland_1981,
title={LOWESS: A program for smoothing scatterplots by robust locally weighted regression},
author={Cleveland, William S},
DOI={10.2307/2683591},
journal={American Statistician},
volume={35},
number={1},
pages={54},
year={1981}
}

@article{Colyer_2018,
title={A review of the evolution of vision-based motion analysis and the integration of advanced computer vision methods towards developing a markerless system},
author={Colyer, Steffi L and Evans, Murray and Cosker, Darren P and Salo, Aki IT},
journal={Sports medicine-open},
DOI={10.1186/s40798-018-0139-y},
volume={4},
number={1},
pages={1--15},
year={2018},
publisher={SpringerOpen}
}

@article{Delp_2007,
title={OpenSim: open-source software to create and analyze dynamic simulations of movement},
author={Delp, Scott L and Anderson, Frank C and Arnold, Allison S and Loan, Peter and Habib, Ayman and John, Chand T and Guendelman, Eran and Thelen, Darryl G},
journal={IEEE transactions on biomedical engineering},
volume={54},
number={11},
pages={1940--1950},
year={2007},
URL = {https://ieeexplore.ieee.org/abstract/document/4352056},
DOI = {10.1109/TBME.2007.901024},
publisher={IEEE}
}

@inproceedings{Fang_2017,
title={{RMPE}: Regional Multi-person Pose Estimation},
author={Fang, Hao-Shu and Xie, Shuqin and Tai, Yu-Wing and Lu, Cewu},
booktitle={ICCV},
year={2017},
URL = {https://ieeexplore.ieee.org/document/8237518},
DOI = {10.1109/ICCV.2017.256}
}

@article{Hartley_1997,
title={Triangulation},
author={Hartley, Richard I and Sturm, Peter},
journal={Computer vision and image understanding},
DOI={10.1006/cviu.1997.0547},
volume={68},
number={2},
pages={146--157},
year={1997},
publisher={Elsevier}
}

@misc{Hidalgo_2019,
author = {Hidalgo, Ginés},
title = {OpenPose Experimental Models},
year = {2019},
publisher = {GitHub},
journal = {GitHub repository},
url = {https://github.com/CMU-Perceptual-Computing-Lab/openpose_train/tree/master/experimental_models#body_25b-model---option-2-recommended}
}

@misc{Hidalgo_2021,
author = {Hidalgo, Ginés},
title = {OpenPose 3D reconstruction module},
year = {2021},
publisher = {GitHub},
journal = {GitHub repository},
url = {https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/doc/advanced/3d_reconstruction_module.md}
}

@article{Kanko_2021,
title={Concurrent assessment of gait kinematics using marker-based and markerless motion capture},
author={Kanko, Robert M and Laende, Elise K and Davis, Elysia M and Selbie, W Scott and Deluzio, Kevin J},
journal={Journal of biomechanics},
volume={127},
pages={110665},
year={2021},
URL = {https://doi.org/10.1016/j.jbiomech.2021.110665},
DOI = {10.1016/j.jbiomech.2021.110665},
publisher={Elsevier}
}

@article{Karashchuk_2021,
title={Anipose: a toolkit for robust markerless 3D pose estimation},
author={Karashchuk, Pierre and Rupp, Katie L and Dickinson, Evyn S and Walling-Bell, Sarah and Sanders, Elischa and Azim, Eiman and Brunton, Bingni W and Tuthill, John C},
journal={Cell reports},
volume={36},
number={13},
pages={109730},
year={2021},
URL = {https://doi.org/10.1016/j.celrep.2021.109730},
DOI = {10.1016/j.celrep.2021.109730},
publisher={Elsevier}
}

@article{Mathis_2018,
title={DeepLabCut: markerless pose estimation of user-defined body parts with deep learning},
author={Mathis, Alexander and Mamidanna, Pranav and Cury, Kevin M and Abe, Taiga and Murthy, Venkatesh N and Mathis, Mackenzie Weygandt and Bethge, Matthias},
journal={Nature neuroscience},
volume={21},
number={9},
pages={1281--1289},
year={2018},
URL = {https://www.nature.com/articles/s41593-018-0209-y},
DOI = {10.1038/s41593-018-0209-y},
publisher={Nature Publishing Group}
}

@misc{Matthis_2022,
type={Python},
title={FreeMoCap: A free, open source markerless motion capture system},
rights={AGPL-3.0},
url={https://github.com/freemocap/freemocap},
author={Matthis, Jonathan Samir and Cherian, Aaron},
year={2022},
publisher = {GitHub},
journal = {GitHub repository},
}

@article{Needham_2021,
title={The accuracy of several pose estimation methods for 3D joint centre localisation},
author={Needham, Laurie and Evans, Murray and Cosker, Darren P and Wade, Logan and McGuigan, Polly M and Bilzon, James L and Colyer, Steffi L},
journal={Scientific reports},
DOI={10.1038/s41598-021-00212-x},
volume={11},
number={1},
pages={1--11},
year={2021},
publisher={Nature Publishing Group}
}

@article{Pagnon_2021,
title={Pose2Sim: An End-to-End Workflow for 3D Markerless Sports Kinematics—Part 1: Robustness},
author={Pagnon, David and Domalain, Mathieu and Reveret, Lionel},
journal={Sensors},
volume={21},
number={19},
year={2021},
URL = {https://www.mdpi.com/1424-8220/21/19/6530},
DOI = {10.3390/s21196530},
publisher={Multidisciplinary Digital Publishing Institute}
}

@article{Pagnon_2022,
title = {Pose2Sim: An End-to-End Workflow for 3D Markerless Sports Kinematics—Part 2: Accuracy},
author = {Pagnon, David and Domalain, Mathieu and Reveret, Lionel},
journal = {Sensors},
volume={22},
number={7},
year={2022},
URL = {https://www.mdpi.com/1424-8220/22/7/2712},
DOI = {10.3390/s22072712},
publisher={Multidisciplinary Digital Publishing Institute}
}

@article{Rajagopal_2016,
title={Full-body musculoskeletal model for muscle-driven simulation of human gait},
author={Rajagopal, Apoorva and Dembia, Christopher L and DeMers, Matthew S and Delp, Denny D and Hicks, Jennifer L and Delp, Scott L},
journal={IEEE transactions on biomedical engineering},
DOI={10.1109/tbme.2016.2586891},
volume={63},
number={10},
pages={2068--2079},
year={2016},
publisher={IEEE}
}

@article{Seth_2018,
DOI = {10.1371/journal.pcbi.1006223},
author = {Seth, Ajay AND Hicks, Jennifer L. AND Uchida, Thomas K. AND Habib, Ayman AND Dembia, Christopher L. AND Dunne, James J. AND Ong, Carmichael F. AND DeMers, Matthew S. AND Rajagopal, Apoorva AND Millard, Matthew AND Hamner, Samuel R. AND Arnold, Edith M. AND Yong, Jennifer R. AND Lakshmikanth, Shrinidhi K. AND Sherman, Michael A. AND Ku, Joy P. AND Delp, Scott L.},
journal = {PLOS Computational Biology},
publisher = {Public Library of Science},
title = {OpenSim: Simulating musculoskeletal dynamics and neuromuscular control to study human and animal movement},
year = {2018},
month = {07},
volume = {14},
url = {https://doi.org/10.1371/journal.pcbi.1006223},
pages = {1-20},
number = {7},
}

@article{Sheshadri_2020,
DOI={10.21105/joss.01849},
url = {https://doi.org/10.21105/joss.01849},
year = {2020},
publisher = {The Open Journal},
volume = {5},
number = {45},
pages = {1849},
author = {Sheshadri, Swathi and Dann, Benjamin and Hueser, Timo and Scherberger, Hansjoerg},
title = {3D reconstruction toolbox for behavior tracked with multiple cameras},
journal = {Journal of Open Source Software}
}

@article{Uhlrich_2022,
title={OpenCap: 3D human movement dynamics from smartphone videos},
url={https://www.biorxiv.org/content/10.1101/2022.07.07.499061v1},
DOI={10.1101/2022.07.07.499061},
publisher={bioRxiv},
author={Uhlrich, Scott D. and Falisse, Antoine and Kidziński, Łukasz and Muccini, Julie and Ko, Michael and Chaudhari, Akshay S. and Hicks, Jennifer L. and Delp, Scott L.},
year={2022},
month={Jul},
pages={2022.07.07.499061}
}

@article{Zeni_2008,
title={Two simple methods for determining gait events during treadmill and overground walking using kinematic data},
author={Zeni Jr, JA and Richards, JG and Higginson, JS2384115},
journal={Gait \& posture},
volume={27},
number={4},
pages={710--714},
year={2008},
URL={https://doi.org/10.1016/j.gaitpost.2007.07.007},
DOI={10.1016/j.gaitpost.2007.07.007},
publisher={Elsevier}
}

@article{Zhang_2000,
title={A flexible new technique for camera calibration},
author={Zhang, Zhengyou},
journal={IEEE Transactions on pattern analysis and machine intelligence},
DOI={10.1109/34.888718},
volume={22},
number={11},
pages={1330--1334},
year={2000},
publisher={IEEE}
}

@article{Zheng_2022,
title={Deep learning-based human pose estimation: A survey},
author={Zheng, Ce and Wu, Wenhan and Yang, Taojiannan and Zhu, Sijie and Chen, Chen and Liu, Ruixu and Shen, Ju and Kehtarnavaz, Nasser and Shah, Mubarak},
journal={arXiv},
year={2022},
URL={https://doi.org/10.48550/arXiv.2012.13392},
DOI={10.48550/arXiv.2012.13392},
}
90 changes: 90 additions & 0 deletions Content/paper.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
---
title: 'Sports2D: Compute 2D joint and segment angles from a video.'
tags:
- python
- markerless kinematics
- motion capture
- sports performance analysis
- openpose
- clinical gait analysis
authors:
- name: David Pagnon^[corresponding author]
orcid: 0000-0002-6891-8331
affiliation: 1
affiliations:
- name: Centre for the Analysis of Motion, Entertainment Research & Applications (CAMERA), University of Bath, Claverton Down, Bath, BA2 7AY, UK
index: 1
date: February 14 2024
bibliography: paper.bib
---


# Summary
`Sports2D` provides a user-friendly solution for automatic analysis of human movement from videos. This Python package uses markerless 2D pose estimation to detect joint coordinates from videos, and then computes 2D joint and segment angles. It can be installed either locally or on a free server, which makes it possible to run it directly from a smartphone.

It outputs annotated videos and image sequences which display joint locations, joint angles, and segment angles, for each of the detected persons. This information is also stored in .csv files, editable on MS Excel or any other spreadsheet editor for further analysis.

`Sports2D` may be useful for clinicians as a decision supports system (CDSS) [], as well as for gait analysis [] or ergonomic design []. Sports coaches can also use it to quantify key performance indicators (KPIs) [], or to better understand, correct, or compare athletes' movement patterns. Finally, it can be used by researchers as a simple tool for 2D biomechanical analysis on the fly. One of the multiple use cases would be to evaluate ACL injury risks from deceleration drills [].

![Example results from a Demo video.\label{fig:demo video results}](demo_openpose_results.png)
![Example plot of joint angle evolution.\label{fig:joint angle evolution}](demo_show_plots.png)


# Statement of need

Machine learning has recently accelerated the development and availability of markerless kinematics, which allows for the collection of kinematic data without the use of physical markers or of manual annotation.

A large part of these tools focus on 2D analysis, such as `OpenPose` [@Cao_2019], `BlazePose` [], or `DeepLabCut` []. Although they bear the advantage of being open-source, they are not easily accessible to people who do not have a programming background, and the output is not directly usable for further kinematic investigation. Yet, clinical acceptance of new technologies is known to be influenced not only by their price value and their performance, but also by their perceived ease-of-use, the social influence around the customer, and other parameters described by the Unified Theory of Acceptance and Use of Technology (UTAUT2) [].

In fact, there is a clear trade-off between accuracy and ease-of-use. Some open-source tools focus on the accuracy of a 3D analysis by using multiple cameras, such as `Pose2Sim` [] or `OpenCap` []. These, however, require either a certain level of programming skills, or a particular hardware setup. Some other tools choose to put more emphasis on user-friendliness, and point out that 2D analysis is often sufficient when the analyzed motion mostly lies in the sagittal or frontal plane. `Kinovea` [], for example, is a widely used software for sports performance analysis which provides multiple additional features. However, it relies on tracking manual labels. This may be time-consuming when analyzing numerous videos, and it can also be lacking robustness when the tracked points of interest are lost. It is also only available on Windows, and requires the user to transfer files prior to analysis.

`Sports2D` is an alternative solution that aims at filling this gap: it is free and open-source, easy to install, can be run from any smartphone or computer, and automatically provides 2D joint and segment angles without the need for manual annotation. It is also robust, and can be used to analyze numerous videos at once. The motion of multiple people can be analyzed in the same video, and the output is directly usable for further statistical analysis.


# Workflow

`Sports2D` can be installed and run two different ways: locally, or on a Google Colab free server [].
- *If run locally*, it can be installed via `pip install sports2d`. Two options are then offered: either run it with BlazePose as a pose estimation model, or with OpenPose. BlazePose comes preinstalled and is very fast, however it is less accurate and only detects one person per video. OpenPose is more accurate, allows for the detection of multiple people, and comes with more fine-tuning in `Sports2D`, but it is slower and requires the user to install it themselves.
- *If run on Colab*, it can be installed in one click from any computer or smartphone device, either every time the user needs it, or once for all on Google Drive. In this case, OpenPose can be automatically installed and runs by default, and video and table results are automatically saved on Google Drive. A video tutorial can be found at this address: https://www.youtube.com/watch?v=Er5RpcJ8o1Y.

After installation, the user can choose one or several videos to analize. Then, `Sports2D` goes through two stages:
- **Pose detection:** Joint centers are detected for each video frame. If OpenPose is used, multiple persons can be detected with consistent IDs across frames. A person is associated to another in the next frame when they are at a small distance. Sequences of missing data are interpolated if they are less than N frames long, N being a threshold defined by the user. Resulting coordinates can be filtered with a Butterworth, Gaussian, Median, or LOESS filter. They can also be plotted. Note that locations are in pixels, but can be converted to meters if the user provides the distance between two points in the video.
- **Joint and segment angle estimation:** Specific joint and segment angles can be chosen, and are computed from the previously calculated positions.
If a person suddenly faces the other way, this change of direction is taken into account. The person is considered to go to the left when their toes are to the left of their heels.
Resulting angles can be filtered in the same way as point coordinates, and they can also be plotted.

Joint angle conventions are as follows:
- Ankle dorsiflexion: Between heel and big toe, and ankle and knee
- Knee flexion: Between hip, knee, and ankle
- Hip flexion: Between knee, hip, and shoulder
- Shoulder flexion: Between hip, shoulder, and elbow
- Elbow flexion: Between wrist, elbow, and shoulder

Segment angles are measured anticlockwise between the horizontal and the segment lines:
- Foot: Between heel and big toe
- Shank: Between knee and ankle
- Thigh: Between hip and knee
- Arm: Between shoulder and elbow
- Forearm: Between elbow and wrist
- Trunk: Between shoulder midpoint and hip midpoint

![Joint angle conventions. Adapted from [@Yang2007].\label{fig:joint angle conventions}](Joint_convention.png)


# Limitations

The user of `Sports2D` should be aware of the following limitations:
- Results are acceptable only if the persons move in the 2D plane, from right to left or from left to right.
If you need research-grade markerless joint kinematics, consider using several cameras, and constraining angles to a biomechanically accurate model. See `Pose2Sim` for example.
- Angle estimation is only as good as the pose estimation algorithm, i.e., it is not perfect, especially if motion blur is significant such as on some broadcast videos.
- Google Colab does not follow the European GDPR requirements regarding data privacy []. Install locally if this matters.


# Acknowledgements
I would like to acknowledge Rob Olivar, a sports coach who enlightened me about the need for such a tool.\
I also acknowledge the work of the dedicated people involved in the many major software programs and packages used by `Sports2D`, such as `Python`, `OpenPose`, `BlazePose`, `OpenCV` [@Bradski_2000], among others.


# References


Loading

0 comments on commit 579b093

Please sign in to comment.