-
Code: Source
-
Download fall dataset:
python ./scripts/download_fall_dataset.py
-o ./data/falldataset \
--train \
--valid \
--test
- Process fall dataset:
python ./scripts/process_fall_dataset.py \
-i ./data/fall_dataset/train \
-o ./data/fall_dataset-processed/train
python ./scripts/process_fall_dataset.py \
-i ./data/fall_dataset/test \
-o ./data/fall_dataset-processed/test
python scripts/convert_yolo_dataset.py \
--input "./data/custom_dataset_yolo" \
--output "./data/custom_dataset"
wget https://github.com/sumitkumarjethani/fall-detection/releases/download/v0.1/videos-dataset-train-raw.zip
Movenet:
python scripts/generate_landmarks_dataset.py \
-i "./data/videos-dataset-test-raw" \
-o "./data/movenet-videos-dataset-test-out" \
-f "./data/movenet-videos-dataset-test-csv" \
--pose-model-name "movenet" \
--movenet-version "movenet_thunder"
Mediapipe:
python scripts/generate_landmarks_dataset.py \
-i "./data/videos-dataset-test-raw" \
-o "./data/mediapipe-videos-dataset-test-out" \
-f "./data/mediapipe-videos-dataset-test-csv" \
--pose-model-name "mediapipe"
Yolo:
python scripts/generate_landmarks_dataset.py \
-i "./data/videos-dataset-test-raw
-o "./data/yolo-videos-dataset-test-out" \
-f "./data/yolo-videos-dataset-test-csv" \
--pose-model-name "yolo"
--yolo-pose-model-path "./models/yolov8n-pose.pt"
Movenet:
python scripts/generate_landmarks_dataset.py \
-i "./data/videos-dataset-test" \
-o "./data/movenet-videos-dataset-test-out" \
-f "./data/movenet-videos-dataset-test-csv" \
--pose-model-name "movenet" \
--movenet-version "movenet_thunder" \
--horizontal-flip \
--rotate 10 \
--zoom 1.1
Mediapipe:
python scripts/generate_landmarks_dataset.py \
-i "./data/videos-dataset-test" \
-o "./data/mediapipe-videos-dataset-test-out" \
-f "./data/mediapipe-videos-dataset-test-csv" \
--pose-model-name "mediapipe" \
--horizontal-flip \
--rotate 10 \
--zoom 1.1
Yolo:
python scripts/generate_landmarks_dataset.py \
-i "./data/videos-dataset-test" \
-o "./data/yolo-videos-dataset-test-out" \
-f "./data/yolo-videos-dataset-test-csv" \
--pose-model-name "yolo"
--yolo-pose-model-path "./models/yolov8n-pose.pt" \
--horizontal-flip \
--rotate 10 \
--zoom 1.1
All models:
python scripts/generate_intersection_landmarks_dataset.py \
-i "./data/movenet-videos-dataset-test-csv" "./data/mediapipe-videos-dataset-test-csv" "./data/yolo-videos-dataset-test-csv" \
-o "./data/intersection"
Movenet:
python scripts/image_pose_inference.py \
-i "./data/fall_sample.jpg" \
-o "./data/fall_sample_out.jpg" \
--pose-model-name "movenet" \
--movenet-version "movenet_thunder"
Mediapipe:
python scripts/image_pose_inference.py \
-i "./data/fall_sample.jpg" \
-o "./data/fall_sample_out.jpg" \
--pose-model-name "mediapipe"
Yolo:
python scripts/image_pose_inference.py \
-i "./data/fall_sample.jpg" \
-o "./data/fall_sample_out.jpg" \
--pose-model-name "yolo"
--yolo-pose-model-path "./models/yolov8n-pose.pt"
Movenet:
python scripts/webcam_pose_inference.py \
--pose-model-name "movenet" \
--movenet-version "movenet_thunder"
Mediapipe:
python scripts/webcam_pose_inference.py \
--pose-model-name "mediapipe"
Yolo:
python scripts/webcam_pose_inference.py \
--pose-model-name "yolo"
--yolo-pose-model-path "./models/yolov8n-pose.pt"
Movenet:
python scripts/train_pose_classifier.py \
-i "./data/movenet-videos-dataset-test-csv" \
-o "./models" \
--model "rf" \
--model-name "movenet_rf_pose_classifier" \
--n-kps 17 \
--n-dim 2
Mediapipe:
python scripts/train_pose_classifier.py \
-i "./data/mediapipe-videos-dataset-test-csv" \
-o "./models" \
--model "rf" \
--model-name "mediapipe_rf_pose_classifier" \
--n-kps 33 \
--n-dim 3
Yolo:
python scripts/train_pose_classifier.py \
-i "./data/yolo-videos-dataset-test-csv" \
-o "./models" \
--model "rf" \
--model-name "yolo_rf_pose_classifier" \
--n-kps 17 \
--n-dim 2
Movenet:
python scripts/train_pose_classifier.py \
-i "./data/movenet-videos-dataset-test-csv" \
-o "./models" \
--model "knn" \
--model-name "movenet_knn_pose_classifier" \
--n-kps 17 \
--n-dim 2 \
--n-neighbours 10
Mediapipe:
python scripts/train_pose_classifier.py \
-i "./data/mediapipe-videos-dataset-test-csv" \
-o "./models" \
--model "knn" \
--model-name "mediapipe_knn_pose_classifier" \
--n-kps 33 \
--n-dim 3 \
--n-neighbours 10
Yolo:
python scripts/train_pose_classifier.py \
-i "./data/yolo-videos-dataset-test-csv" \
-o "./models" \
--model "knn" \
--model-name "yolo_knn_pose_classifier" \
--n-kps 17 \
--n-dim 2 \
--n-neighbours 10
Movenet:
python scripts/evaluate_pose_classifier.py \
-i "./data/intersection/movenet-videos-dataset-test-csv" \
-o "./metrics/" \
-f "movenet_rf_test_dataset-inter" \
--pose-classifier "./models/movenet-rf-pose-classifier.pkl"
Mediapipe:
python scripts/evaluate_pose_classifier.py \
-i "./data/intersection/mediapipe-videos-dataset-test-csv" \
-o "./metrics/" \
-f "mediapipe_rf_test_dataset-inter" \
--pose-classifier "./models/mediapipe-rf-pose-classifier.pkl"
Yolo:
python scripts/evaluate_pose_classifier.py \
-i "./data/intersection/yolo-videos-dataset-test-csv" \
-o "./metrics/" \
-f "yolo_rf_test_dataset-inter" \
--pose-classifier "./models/yolo-rf-pose-classifier.pkl"
Movenet:
python scripts/evaluate_pose_classifier.py \
-i "./data/movenet-videos-dataset-test-csv/test" \
-o "./metrics/" \
-f "movenet_knn_personal_dataset" \
--pose-classifier "./models/movenet_knn_pose_classifier.pkl"
Mediapipe:
python scripts/evaluate_pose_classifier.py \
-i "./data/mediapipe-videos-dataset-test-csv/test" \
-o "./metrics/" \
-f "mediapipe_knn_personal_dataset" \
--pose-classifier "./models/mediapipe_knn_pose_classifier.pkl"
Yolo:
python scripts/evaluate_pose_classifier.py \
-i "./data/yolo-videos-dataset-test-csv/test" \
-o "./metrics/" \
-f "yolo_knn_personal_dataset" \
--pose-classifier "./models/yolo_knn_pose_classifier.pkl"
Movenet:
python scripts/video_fall_inference.py \
-i "./data/videos/uri.mp4" \
-o "./data/videos/movenet_uri_out.mp4" \
--pose-model-name "movenet" \
--movenet-version "movenet_thunder" \
--pose-classifier "./models/movenet_rf_pose_classifier.pkl"
Mediapipe:
python scripts/video_fall_inference.py \
-i "./data/videos/uri.mp4" \
-o "./data/videos/mediapipe_uri_out.mp4" \
--pose-model-name "mediapipe" \
--pose-classifier "./models/mediapipe_rf_pose_classifier.pkl"
Yolo:
python scripts/video_fall_inference.py \
-i "./data/videos/demos/03-demo.mp4" \
-o "./data/videos/demos/03-demo_yolo_inference.mp4" \
--pose-model-name "yolo" \
--yolo-pose-model-path "./models/yolov8n-pose.pt" \
--pose-classifier "./models/yolo-rf-pose-classifier.pkl"
Movenet:
python scripts/webcam_fall_inference.py \
--pose-model-name "movenet" \
--movenet-version "movenet_thunder" \
--pose-classifier "./models/movenet-rf-pose-classifier.pkl"
Mediapipe:
python scripts/webcam_fall_inference.py \
--pose-model-name "mediapipe" \
--pose-classifier "./models/mediapipe-rf-pose-classifier.pkl"
Yolo:
python scripts/webcam_fall_inference.py \
--pose-model-name "yolo" \
--yolo-pose-model-path "./models/yolov8n-pose.pt" \
--pose-classifier "./models/yolo-rf-pose-classifier.pkl"
Movenet:
python scripts/video_fall_pipeline.py \
-i "./data/videos/demos/01-demo.mp4" \
-o "./data/videos/demos/01-demo_movenet_out.mp4" \
--pose-model-name "movenet" \
--movenet-version "movenet_thunder" \
--yolo-object-model-path "./models/yolov8n.pt" \
--pose-classifier "./models/movenet-rf-pose-classifier.pkl"
Mediapipe:
python scripts/video_fall_pipeline.py \
-i "./data/videos/demos/01-demo.mp4" \
-o "./data/videos/demos/01-demo_movenet_out.mp4" \
--pose-model-name "mediapipe" \
--yolo-object-model-path "./models/yolov8n.pt" \
--pose-classifier "./models/movenet-rf-pose-classifier.pkl"
Yolo:
python scripts/video_fall_pipeline.py \
-i "./data/videos/demos/03-demo.mp4" \
-o "./data/videos/demos/03-demo_yolo_out.mp4" \
--pose-model-name "yolo" \
--yolo-pose-model-path "./models/yolov8n-pose.pt" \
--yolo-object-model-path "./models/yolov8n.pt" \
--pose-classifier "./models/yolo-rf-pose-classifier.pkl"
- Convert dataset to folder dataset (Optional)
- Generate landmarks dataset
- Train pose classifier
- Evaluate Pose classifier
- Run pipeline inference on example image/video/webcam