diff --git a/README.md b/README.md
index 53dc069..684ccdf 100644
--- a/README.md
+++ b/README.md
@@ -144,7 +144,7 @@ sports2d --help
```
- Run with custom parameters (all non specified are set to default):
``` cmd
- sports2d --show_graphs False --frame_range 0 2.1 --result_dir path_to_result_dir
+ sports2d --show_graphs False --time_range 0 2.1 --result_dir path_to_result_dir
```
``` cmd
sports2d --multiperson false --mode lightweight --det_frequency 50
@@ -218,7 +218,7 @@ Will be much faster, with no impact on accuracy. However, the installation takes
```
- Choose a fraction of the video to analyze (in seconds)file. For example:
```cmd
- sports2d --frame_range 0 2.1
+ sports2d --time_range 0 2.1
```
@@ -234,7 +234,7 @@ Sports2D:
**Okay but how does it work, really?**\
Sports2D:
-1. Reads stream from a webcam, from one video, or from a list of videos. Selects the specified frame range to process.
+1. Reads stream from a webcam, from one video, or from a list of videos. Selects the specified time or frame range to process.
2. Sets up the RTMLib pose tracker from RTMlib with specified parameters. It can be run in lightweight, balanced, or performance mode, and for faster inference, keypoints can be tracked instead of detected for a certain number of frames. Any RTMPose model can be used.
3. Tracks people so that their IDs are consistent across frames. A person is associated to another in the next frame when they are at a small distance. IDs remain consistent even if the person disappears from a few frames. This carefully crafted `sports2d` tracker runs at a comparable speed as the RTMlib one but is much more robust. The user can still choose the RTMLib method if they need it by specifying it in the Config.toml file.
4. Retrieves the keypoints with high enough confidence, and only keeps the persons with high enough average confidence.
diff --git a/Sports2D/Demo/Config_demo.toml b/Sports2D/Demo/Config_demo.toml
index 7b6bc68..836943b 100644
--- a/Sports2D/Demo/Config_demo.toml
+++ b/Sports2D/Demo/Config_demo.toml
@@ -13,13 +13,13 @@
[project]
video_input = 'demo.mp4' # 'webcam' or '', or ['video1_path.mp4', 'video2_path.avi>', ...]. Beware that images won't be saved if paths contain non ASCII characters.
+video_dir = '' # BETWEEN SINGLE QUOTES! # If empty, project dir is current dir
# Video parameters
-frame_range = [] # [] for the whole video, or [start_frame, end_frame], or [[start_frame1, end_frame1], [start_frame2, end_frame2], ...]
-## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
-## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
-## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
-video_dir = '' # BETWEEN SINGLE QUOTES! # If empty, result dir is current dir
+time_range = [] # Specify as [] for the whole video, or [start_time, end_time] in seconds, # or [[start_time1, end_time1], [start_time2, end_time2], ...] for multiple intervals.
+frame_range = [] # Specify as [] for the whole video, or [start_frame, end_frame], # or [[start_frame1, end_frame1], [start_frame2, end_frame2], ...] for multiple intervals.
+
+# Note: `time_range` and `frame_range` cannot be specified simultaneously.
# Webcam parameters
webcam_id = 0 # your webcam id (0 is default)
diff --git a/Sports2D/Sports2D.py b/Sports2D/Sports2D.py
index 0edd085..91e4ca1 100644
--- a/Sports2D/Sports2D.py
+++ b/Sports2D/Sports2D.py
@@ -28,7 +28,7 @@
- Run on webcam with default parameters:
sports2d --video_input webcam
- Run with custom parameters (all non specified are set to default):
- sports2d --show_plots False --frame_range 0 2.1 --result_dir path_to_result_dir
+ sports2d --show_plots False --time_range 0 2.1 --result_dir path_to_result_dir
sports2d --multiperson false --mode lightweight --det_frequency 50
- Run with a toml configuration file:
sports2d --config path_to_config.toml
@@ -65,7 +65,7 @@
- loads skeleton information
- reads stream from a video or a webcam
- sets up the RTMLib pose tracker from RTMlib with specified parameters
- - detects poses within the selected frame range
+ - detects poses within the selected time or frame range
- tracks people so that their IDs are consistent across frames
- retrieves the keypoints with high enough confidence, and only keeps the persons with enough high-confidence keypoints
- computes joint and segment angles, and flips those on the left/right side them if the respective foot is pointing to the left
@@ -123,6 +123,7 @@
## CONSTANTS
DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
+ 'time_range': [],
'frame_range': [],
'video_dir': '',
'webcam_id': 0,
@@ -188,7 +189,8 @@
CONFIG_HELP = {'config': ["c", "Path to a toml configuration file"],
'video_input': ["i", "webcam, or video_path.mp4, or video1_path.avi video2_path.mp4 ... Beware that images won't be saved if paths contain non ASCII characters"],
'webcam_id': ["w", "webcam ID. 0 if not specified"],
- 'frame_range': ["t", "start_frame, end_frame. Whole video if not specified"],
+ 'time_range': ["t", "start_time, end_time. In seconds. Whole video if not specified"],
+ 'frame_range': ["F", "start_frame, end_frame. Whole video if not specified"],
'video_dir': ["d", "Current directory if not specified"],
'result_dir': ["r", "Current directory if not specified"],
'show_realtime_results': ["R", "show results in real-time. true if not specified"],
@@ -257,11 +259,12 @@ def base_params(config_dict):
result_dir = Path(config_dict.get('process').get('result_dir')).resolve()
if result_dir == '': result_dir = Path.cwd()
- # video_files, frame_rates, frame_ranges
+ # video_files, frame_rates, time_ranges, frame_ranges
video_input = config_dict.get('project').get('video_input')
if video_input == "webcam" or video_input == ["webcam"]:
video_files = ['webcam'] # No video files for webcam
frame_rates = [None] # No frame rate for webcam
+ time_ranges = [None]
frame_ranges = [None]
else:
# video_files
@@ -283,6 +286,17 @@ def base_params(config_dict):
frame_rates.append(frame_rate)
video.release()
+ # time_ranges
+ time_ranges = np.array(config_dict.get('project').get('time_range'))
+ if time_ranges.shape == (0,):
+ time_ranges = [None] * len(video_files)
+ elif time_ranges.shape == (2,):
+ time_ranges = [time_ranges.tolist()] * len(video_files)
+ elif time_ranges.shape == (len(video_files), 2):
+ time_ranges = time_ranges.tolist()
+ else:
+ raise ValueError('Time range must be [] for analysing all frames of all videos, or [start_time, end_time] for analysing all videos from start_time to end_time, or [[start_time1, end_time1], [start_time2, end_time2], ...] for analysing each video for a different time_range.')
+
# frame_ranges
frame_ranges = np.array(config_dict.get('project').get('frame_range'))
if frame_ranges.shape == (0,):
@@ -294,7 +308,7 @@ def base_params(config_dict):
else:
raise ValueError('Frame range must be [] for analysing all frames of all videos, or [start_frame, end_frame] for analysing all videos from start_frame to end_frame, or [[start_frame1, end_frame1], [start_frame2, end_frame2], ...] for analysing each video for a different frame_range.')
- return video_dir, video_files, frame_rates, frame_ranges, result_dir
+ return video_dir, video_files, time_ranges, frame_ranges, frame_rates, result_dir
def get_leaf_keys(config, prefix=''):
@@ -365,19 +379,31 @@ def process(config='Config_demo.toml'):
config_dict = config
else:
config_dict = read_config_file(config)
- video_dir, video_files, frame_rates, frame_ranges, result_dir = base_params(config_dict)
+ video_dir, video_files, time_ranges, frame_ranges, frame_rates, result_dir = base_params(config_dict)
result_dir.mkdir(parents=True, exist_ok=True)
with open(result_dir / 'logs.txt', 'a+') as log_f: pass
logging.basicConfig(format='%(message)s', level=logging.INFO, force=True,
handlers = [logging.handlers.TimedRotatingFileHandler(result_dir / 'logs.txt', when='D', interval=7), logging.StreamHandler()])
- for video_file, frame_range, frame_rate in zip(video_files, frame_ranges, frame_rates):
+ for video_file, time_range, frame_range, frame_rate in zip(video_files, time_ranges, frame_ranges, frame_rates):
currentDateAndTime = datetime.now()
- frame_range_str = f' from frame {frame_range[0]} to frame {frame_range[1]}' if frame_range else ''
+ range_str = ''
+
+ if time_range and frame_range:
+ logging.error("Error: Both time_range and frame_range are specified for video {}. Only one should be provided.".format(video_file))
+ continue
+ elif time_range:
+ frame_range = [int(time_range[0] * frame_rate), int(time_range[1] * frame_rate)]
+ range_str = f' from {time_range[0]} to {time_range[1]} seconds'
+ elif frame_range:
+ frame_range = [int(frame_range[0]), int(frame_range[1])]
+ range_str = f' from frame {frame_range[0]} to frame {frame_range[1]}'
+ else:
+ frame_range = None
logging.info("\n\n---------------------------------------------------------------------")
- logging.info(f"Processing {video_file}{frame_range_str}")
+ logging.info(f"Processing {video_file}{range_str}")
logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}")
logging.info("---------------------------------------------------------------------")
@@ -408,7 +434,7 @@ def main():
- Run on webcam with default parameters:
sports2d --video_input webcam
- Run with custom parameters (all non specified are set to default):
- sports2d --show_plots False --frame_range 0 2.1 --result_dir path_to_result_dir
+ sports2d --show_plots False --time_range 0 2.1 --result_dir path_to_result_dir
sports2d --multiperson false --mode lightweight --det_frequency 50
- Run with a toml configuration file:
sports2d --config path_to_config.toml
@@ -427,7 +453,7 @@ def main():
parser.add_argument(*arg_str, type=str2bool, help=CONFIG_HELP[leaf_name][1])
elif type(leaf_keys[leaf_name]) == list:
if len(leaf_keys[leaf_name])==0:
- list_type = float # frame_range for example
+ list_type = float # time_range for example
else:
list_type = type(leaf_keys[leaf_name][0])
parser.add_argument(*arg_str, type=list_type, nargs='*', help=CONFIG_HELP[leaf_name][1])
diff --git a/Sports2D/process.py b/Sports2D/process.py
index cdd74bc..d3e08ba 100644
--- a/Sports2D/process.py
+++ b/Sports2D/process.py
@@ -16,7 +16,7 @@
- loads skeleton information
- reads stream from a video or a webcam
- sets up the RTMLib pose tracker from RTMlib with specified parameters
- - detects poses within the selected frame range
+ - detects poses within the selected time or frame range
- tracks people so that their IDs are consistent across frames
- retrieves the keypoints with high enough confidence, and only keeps the persons with enough high-confidence keypoints
- computes joint and segment angles, and flips those on the left/right side them if the respective foot is pointing to the left
@@ -1115,7 +1115,7 @@ def process_fun(config_dict, video_file, frame_range, frame_rate, result_dir):
logging.warning('Webcam input: the framerate may vary. If results are filtered, Sports2D will use the average framerate as input.')
else:
cap, out_vid, cam_width, cam_height, fps = setup_video(video_file_path, save_vid, vid_output_path)
- frame_range = [int(frame_range[0]), int(frame_range[1])] if frame_range else [0, int(cap.get(cv2.CAP_PROP_FRAME_COUNT))]
+ frame_range = frame_range if frame_range else [0, int(cap.get(cv2.CAP_PROP_FRAME_COUNT))]
frame_iterator = tqdm(range(*frame_range)) # use a progress bar
if show_realtime_results:
cv2.namedWindow(f'{video_file} Sports2D', cv2.WINDOW_NORMAL + cv2.WINDOW_KEEPRATIO)