diff --git a/api_example/demo.py b/api_example/demo.py index 057f809..10756ea 100644 --- a/api_example/demo.py +++ b/api_example/demo.py @@ -17,9 +17,6 @@ import subprocess import torch -import sys -sys.path.append('.') - from lighthouse.models import CGDETRPredictor from typing import Dict, List, Optional @@ -37,13 +34,9 @@ def load_weights(weight_dir: str) -> None: # use GPU if available device: str = 'cuda' if torch.cuda.is_available() else 'cpu' weight_dir: str = 'gradio_demo/weights' -#weight_path: str = os.path.join(weight_dir, 'clip_cg_detr_qvhighlight.ckpt') -#model: CGDETRPredictor = CGDETRPredictor(weight_path, device=device, feature_name='clip', -# slowfast_path=None, pann_path=None) -weight_path: str = os.path.join(weight_dir, 'clip_slowfast_pann_cg_detr_qvhighlight.ckpt') -model: CGDETRPredictor = CGDETRPredictor(weight_path, device=device, feature_name='clip_slowfast_pann', - slowfast_path='SLOWFAST_8x8_R50.pkl', pann_path='Cnn14_mAP=0.431.pth') - +weight_path: str = os.path.join(weight_dir, 'clip_cg_detr_qvhighlight.ckpt') +model: CGDETRPredictor = CGDETRPredictor(weight_path, device=device, feature_name='clip', + slowfast_path=None, pann_path=None) # encode video features model.encode_video('api_example/RoripwjYFp8_60.0_210.0.mp4')