diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ed8ebf5 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +__pycache__ \ No newline at end of file diff --git a/README.md b/README.md index 65a0696..f615b5c 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,116 @@ # Video-heatmap -Script for heatmap generation from single video with static camera. + +## Table of contents + +* [Description](#description) +* [Getting Started](#getting-started) +* [Usage](#usage) +* [Example output](#output) + +## Description + +Script for heatmap generation (for movement) from single video with static camera. + +**Program pipeline:** + +1. Load frame +2. Substract background +3. Accumulate movement +4. Normalize accumulated frame (for view only) +5. Apply heatmap colormap +6. Optional: Supervise with original frame (check --alpha parameter) +7. Save new frame + +**Program structure:** + +* video_heatmap.py - main program with whole pipeline implemented +* vision.py - image operations module +* arguments_parser.py - manage script arguments + +## Getting Started + +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1J10ZQep7UX2BU3MnaWLJbwfe9nBuq9lP?usp=sharing) + +### Quick start + +Tested with python 3.8.3. + +Libraries used: [opencv-python](https://github.com/opencv/opencv-python). (Tested on ver. 4.4.0)\ +You can install it using pip. + +```bash +pip install -r requirements.txt +``` + +Check if script works and show help + +```bash +python video_heatmap.py -h +``` + +## Usage + +```bash +video_heatmap.py [-h] -f VIDEO_FILE [-a VIDEO_ALPHA] [-d] [-o VIDEO_OUTPUT] [-s VIDEO_SKIP] [-t TAKE_EVERY] + +optional arguments: + -h, --help show this help message and exit + -f VIDEO_FILE, --file VIDEO_FILE + Video file path for which heatman will be created. Example: input.mp4 + -a VIDEO_ALPHA, --alpha VIDEO_ALPHA + Optional parameter to adjust alpha between heatmap and original video frames. + Value between 0.0-1.0 represent what part of original video heatmap gonna take. + Default: 0.9 + -d, --disable Disable live view of heatmap generation. + -o VIDEO_OUTPUT, --output VIDEO_OUTPUT + Adjust name of output files. Script creates two files one video .mp4 and one image .png. + Default: output + -s VIDEO_SKIP, --skip VIDEO_SKIP + Skip first number of frames in order to warm up background substraction alghoritm. + Default: 200 frames + -t TAKE_EVERY, --take-every TAKE_EVERY + In order to speed up process it is possible to skip frames and take every x frame. + Default: 1 (take all frames). +``` + +Example usage: + +```bash +python video_heatmap.py -f input.mp4 -o output_name +``` + +If u want to **DISABLE LIVE VIEW** of process use -d flag + +```bash +python video_heatmap.py -f input.mp4 -o output_name -d +``` + +If u want to check **SPEED UP PROCESS** you can adjust -t flag to take every x frame. \ +Example: Take every third frame + +```bash +python video_heatmap.py -f input.mp4 -o output_name -t 3 +``` + +It is good idea to warm up background substraction alghoritm and skip first x frames. \ +If u want to do it **ADJUST NUMBER OF WARMUP FRAMES** use -s flag + +```bash +python video_heatmap.py -f input.mp4 -o output_name -s 100 +``` + +If u want to **CHANGE ALPHA BETWEEN ORIGINAL AND HEATMAP** use -a flag + +```bash +python video_heatmap.py -f input.mp4 -o output_name -a 0.5 +``` + +## Example output + +Original video: + +[![Original video](https://img.youtube.com/vi/MNn9qKG2UFI/hqdefault.jpg)](https://youtu.be/MNn9qKG2UFI) + +Output of script: + +[![Output video](https://img.youtube.com/vi/UhYFcNcXlvs/hqdefault.jpg)](https://youtu.be/UhYFcNcXlvs) diff --git a/arguments_parser.py b/arguments_parser.py new file mode 100644 index 0000000..6831dbd --- /dev/null +++ b/arguments_parser.py @@ -0,0 +1,48 @@ +""" +Module for preparing arguments parser. +""" + +import argparse + +def prepare_parser(): + """Prepare parser with arguments and return it.""" + + parser = argparse.ArgumentParser() + + # File path + parser.add_argument("-f", "--file", action="store", dest="video_file", + help="Video file path for which heatman will \ + be created. Example: input.mp4", + required=True) + + # Alpha between original frame and heatmap + parser.add_argument("-a", "--alpha", action="store", dest="video_alpha", + help="Optional parameter to adjust alpha between \ + heatmap and original video frames. Value between \ + 0.0-1.0 represent what part of original video heatmap \ + gonna take. Default: 0.9", + required=False, default=0.9) + + # Disable live view + parser.add_argument("-d", "--disable", action="store_true", dest="video_disable", + help="Disable live view of heatmap generation.", + required=False, default=False) + + # Output name + parser.add_argument("-o", "--output", action="store", dest="video_output", + help="Adjust name of output files. Script creates two files \ + one video .mp4 and one image .png. Default: output", + required=False, default="output") + + # Skip first frames + parser.add_argument("-s", "--skip", action="store", dest="video_skip", + help="Skip first number of frames in order to warm up background \ + substraction alghoritm. Default: 200 frames", + required=False, default=200) + + # Take every x frame + parser.add_argument("-t", "--take-every", action="store", dest="take_every", + help="In order to speed up process it is possible to skip frames and \ + take every x frame. Default: 1 (take all frames).", + required=False, default=1) + return parser diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..98d6794 --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +opencv-python==4.4.0 \ No newline at end of file diff --git a/video_heatmap.py b/video_heatmap.py new file mode 100644 index 0000000..565f28a --- /dev/null +++ b/video_heatmap.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +"""Script for heatmap generation from single video with static camera. + +Program pipeline: + 1. Load frame + 2. Substract background + 3. Accumulate movement + 4. Normalize accumulated frame (for view only) + 5. Apply heatmap colormap + 6. Optional: Supervise with original frame (check --alpha parameter) + 7. Save new frame +""" + +# pylint: disable=no-member + +import cv2 +import numpy as np + +import vision +import arguments_parser + +def main(): + """ + Whole heatmap pipeline creation. + """ + parser = arguments_parser.prepare_parser() + args = parser.parse_args() + + capture = cv2.VideoCapture(args.video_file) + background_subtractor = cv2.createBackgroundSubtractorKNN() + + read_succes, video_frame = capture.read() + + height, width, _ = video_frame.shape + frames_number = capture.get(cv2.CAP_PROP_FRAME_COUNT) + fourcc = cv2.VideoWriter_fourcc("m", "p", "4", "v") + video = cv2.VideoWriter(args.video_output + ".mp4", fourcc, 30.0, (width, height)) + accumulated_image = np.zeros((height, width), np.uint8) + + count = 0 + + while read_succes: + read_succes, video_frame = capture.read() + if read_succes: + background_filter = background_subtractor.apply(video_frame) + if count > args.video_skip and count % args.take_every == 0: + + erodated_image = vision.apply_morph(background_filter, + morph_type=cv2.MORPH_ERODE, + kernel_size=(5,5)) + accumulated_image = vision.add_images(accumulated_image, erodated_image) + normalized_image = vision.normalize_image(accumulated_image) + heatmap_image = vision.apply_heatmap_colors(normalized_image) + frames_merged = vision.superimpose(heatmap_image, video_frame, args.video_alpha) + + if not args.video_disable: + cv2.imshow("Main", frames_merged) + if cv2.waitKey(1) & 0xFF == ord('q'): + break + + video.write(frames_merged) + if count % 100 == 0: + print(f"Progress: {count}/{frames_number}") + count += 1 + + cv2.imwrite(args.video_output + ".png", heatmap_image) + capture.release() + cv2.destroyAllWindows() + +if __name__ == '__main__': + main() diff --git a/vision.py b/vision.py new file mode 100644 index 0000000..c4d0396 --- /dev/null +++ b/vision.py @@ -0,0 +1,91 @@ +""" +Module with functions required for heatmap creation. +""" +# pylint: disable=no-member + +from typing import Tuple +import cv2 +import numpy as np + + +def apply_morph(image: np.ndarray, + morph_type=cv2.MORPH_CLOSE, + kernel_size: Tuple[int, int] = (3,3), + make_gaussian: bool = True): + """ + Apply opencv morphological operation to image and return it. + + Args: + image (np.ndarray): Source image for which function will apply morhpological operation. + morph_type: Opencv morphological operation type. Should start with cv2.MORH_. + kernel_size (Tuple[int, int]): Tuple of ints, representing size of kernel for + morphological operation. + make_gaussian (bool): Tells if we apply gaussian blur. Recommended to use for most videos. + Returns: + Source image with applied chosen morphological operation. + + """ + kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernel_size) + if make_gaussian: + image = cv2.GaussianBlur(image,(3,3),0) + return cv2.morphologyEx(image, morph_type, kernel) + + +def add_images(image1: np.ndarray, + image2: np.ndarray) -> np.ndarray: + """ + Add two images together. Colors values can be bigger then 255 restriction. + + Note: + Use np.uint64 cast so images are able to expand colors restriction above 255. + In order to view this image it is recommended to normalize it first. + Args: + image1 (np.ndarray): First image to add. + image2 (np.ndarray): Second image to add. + Returns: + np.ndarray: Output image represeting addition of image1 and image2. + """ + return np.array(image1, dtype=np.uint64) + np.array(image2, dtype=np.uint64) + + +def normalize_image(image: np.ndarray) -> np.ndarray: + """ + Normalize image to 0-255 range, so it is viewed correctly. + + Args: + image (np.ndarray): Image for which normalization should be applied. + Returns: + np.ndarray: Normalized image. + """ + return cv2.normalize(image, None, 0, 255, cv2.NORM_MINMAX).astype(np.uint8) + + +def apply_heatmap_colors(image: np.ndarray) -> np.ndarray: + """ + Apply colors for heatmap visualisation. + + Args: + image (np.ndarray): Image for which heatmap colors should be applied. + Returns: + np.ndarray: Image with applied heatmap colors. + """ + return cv2.applyColorMap(image, cv2.COLORMAP_TURBO) + + +def superimpose(image1: np.ndarray, + image2: np.ndarray, + alpha: float = 0.5) -> np.ndarray: + """ + Superimpose two images with given alpha values. + + Args: + image1 (np.ndarray): First image to apply for superimpose operation. + image2 (np.ndarray): Second image to apply for superimpose operation. + alpha (float): Alpha of the first image. Second image gets 1 - alpha. + Alpha 0.5 means both images take equal part in superimpose operation. + Returns: + np.ndarray: Image after superimpose operation of image1 and image2. + """ + + return cv2.addWeighted(image1, alpha, image2, 1 - alpha, 0.0) +