diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..abc015b --- /dev/null +++ b/.dockerignore @@ -0,0 +1,50 @@ +# 自定义文件 +upload.sh +w_* +replace +*ttf +fonts +cc + +# IDE 文件 +.idea +.vscode +.vscode-test/ +.vscodeignore + +# Go 相关 +vendor +go.sum +*.exe + +# Python 相关 +venv +__pycache__ +build +dist +*egg-info + +# JavaScript / TypeScript +out +node_modules +*.vsix +*.lock +.yarnrc + +# 日志文件 +*.log +logs + +# 存储文件 +uploads +storage +*.db +testdata/* +!testdata/*.py +!testdata/*.sh +_gsdata_ + +# Dropbox +*.paper + +.gitignore diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..a5b1004 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,41 @@ +# This is a contributed example of how to build ffmpeg-gl-transions using Docker +# If you use Docker, this should get the job done +# if you don't use Docker, you could still run the commands +# manually and get the same result + +# docker build -t rustlekarl/ffmpeg-generator:latest . +FROM rustlekarl/ffmpeg-gltransition:latest + +MAINTAINER rustlekarl "rustlekarl@gmail.com" + +ENV DEBIAN_FRONTEND=noninteractive + +RUN echo "deb http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\n" >/etc/apt/sources.list +RUN echo "[global]\nindex-url=http://mirrors.aliyun.com/pypi/simple/\n[install]\ntrusted-host=mirrors.aliyun.com" > /etc/pip.conf + +RUN apt-get update \ + && apt-get install -y python3-pip python3-dev \ + && cd /usr/local/bin \ + && ln -s /usr/bin/python3 python \ + && ln -s /usr/bin/pip3 pip \ + && pip3 --no-cache-dir install --upgrade pip + +WORKDIR /root + +COPY requirements.txt . +RUN pip install -r requirements.txt + +WORKDIR /generator + +COPY . /generator + +RUN (cd /generator; python run_examples.py) + +RUN rm -rf /generator/* && rm -rf /var/lib/apt/lists/* && apt-get -y purge + +# Overlay parent's ENTRYPOINT +RUN echo "#!/bin/bash\nXvfb -ac :1 -screen 0 1280x1024x16 > /dev/null 2>&1" > /entrypoint.sh + +RUN chmod +x /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..5369e32 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2021 Rustle Karl + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..e4c817d --- /dev/null +++ b/Makefile @@ -0,0 +1,37 @@ +.PHONY: ; +.SILENT: ; # no need for @ +.ONESHELL: ; # recipes execute in same shell +.NOTPARALLEL: ; # wait for target to finish +.EXPORT_ALL_VARIABLES: ; # send all vars to shell + +VERSION = 1.0.0 +PACKAGE = ffmpeg-generator + +# While console windows in Windows 10 do support VT (Virtual Terminal) / ANSI +# escape sequences in principle, support is turned OFF by default. +# Set-ItemProperty HKCU:\Console VirtualTerminalLevel -Type DWORD 1 +# reg add HKCU\Console /v VirtualTerminalLevel /t REG_DWORD /d 1 + +all: dep + +dep: + pip install twine + pip install -r requirements.txt + +setup: dep + python setup.py sdist + python setup.py bdist_wheel + pip install dist/$(PACKAGE)-$(VERSION).tar.gz + +uninstall: + pip uninstall -y $(PACKAGE) + +upload: setup + twine upload dist/$(PACKAGE)-$(VERSION).tar.gz + +docker-build: + docker build -t rustlekarl/ffmpeg-generator:latest . + +docker-exec: + docker-compose up -d + docker exec -it ffmpeg-generator_ffmpeg_1 bash diff --git a/README.md b/README.md new file mode 100644 index 0000000..ef00372 --- /dev/null +++ b/README.md @@ -0,0 +1,460 @@ + + +# FFmpeg Generator + +> A FFmpeg command generator and actuator. + +Python bindings for FFmpeg - with almost all filters support, even `gltransition` filter. + +- [FFmpeg Generator](#ffmpeg-generator) + - [Overview](#overview) + - [Installation](#installation) + - [Documents](#documents) + - [GLTransition Filter](#gltransition-filter) + - [Video Sources](#video-sources) + - [Play by FFplay](#play-by-ffplay) + - [Preview by FFmpeg](#preview-by-ffmpeg) + - [Save Video from video sources](#save-video-from-video-sources) + - [More Examples](#more-examples) + - [Get Stream Info](#get-stream-info) + - [Play a Video](#play-a-video) + - [Generate Thumbnail for Video](#generate-thumbnail-for-video) + - [Convert Video to Numpy Array](#convert-video-to-numpy-array) + - [Read Single Video Frame as JPEG](#read-single-video-frame-as-jpeg) + - [Convert Sound to Raw PCM Audio](#convert-sound-to-raw-pcm-audio) + - [Assemble Video from Sequence of Frames](#assemble-video-from-sequence-of-frames) + - [Audio/Video Pipeline](#audiovideo-pipeline) + - [Mono to Stereo with Offsets and Video](#mono-to-stereo-with-offsets-and-video) + - [Process Frames](#process-frames) + - [FaceTime Webcam Input](#facetime-webcam-input) + - [Stream from a Local Video to HTTP Server](#stream-from-a-local-video-to-http-server) + - [Stream from RTSP Server to TCP Socket](#stream-from-rtsp-server-to-tcp-socket) + - [Special Thanks](#special-thanks) + +## Overview + +This project is based on [`ffmpeg-python`](https://github.com/kkroening/ffmpeg-python). But rewrite all. + +- support video sources +- support almost all filters +- support FFplay&FFprobe +- enable cuda hwaccel by default + +## Installation + +```shell +pip install -U ffmpeg-generator +``` + +## Documents + +FFmpeg comes with more than 450 audio and video media filters. +It is recommended to read the official documentation. + +- [FFmpeg Homepage](https://ffmpeg.org/) +- [FFmpeg Documentation](https://ffmpeg.org/ffmpeg.html) +- [FFmpeg Filters Documentation](https://ffmpeg.org/ffmpeg-filters.html) + +Or read my study notes, plan to demonstrate all the filters, but written in Chinese. Not all done yet. + +- [All Examples for Audio Filters](docs/afilters.md) +- [All Examples for Video Filters](docs/vfilters.md) +- [All Examples for Audio/Video Sources](docs/sources.md) +- [All Examples for Media Filters](docs/mfilters.md) +- [Introduce Usage of FFplay](docs/ffplay.md) +- [More Notes](https://github.com/studying-notes/ffmpeg-notes) + +## GLTransition Filter + +```python +from ffmpeg import avfilters, input, vfilters, vtools +from ffmpeg.transitions import GLTransition, GLTransitionAll +from tests import data + +# OpenGL Transition + +"""Combine two videos with transition effects.""" + +for e in GLTransitionAll: + vtools.concat_2_videos_with_gltransition(data.TEST_OUTPUTS_DIR / (e + ".mp4"), + data.SHORT0, data.SHORT1, offset=1, + duration=2, source=eval("transitions." + e)) + +"""Combine multiple videos with transition effects.""" + +in0 = input(data.SHORT0).video +in1 = input(data.SHORT1).video +in2 = input(data.SHORT2).video + +in0_split = in0.split() +in0_0, in0_1 = in0_split[0], in0_split[1] +in0_0 = in0_0.trim(start=0, end=3) +in0_1 = in0_1.trim(start=3, end=4).setpts() + +in1_split = in1.split() +in1_0, in1_1 = in1_split[0], in1_split[1] +in1_0 = in1_0.trim(start=0, end=3) +in1_1 = in1_1.trim(start=3, end=4).setpts() + +in2_split = in2.split() +in2_0, in2_1 = in2_split[0], in2_split[1] +in2_0 = in2_0.trim(start=0, end=3) +in2_1 = in2_1.trim(start=3, end=4).setpts() + +gl0_1 = vfilters.gltransition(in0_1, in1_0, source=GLTransition.Angular) +gl1_2 = vfilters.gltransition(in1_1, in2_0, source=GLTransition.ButterflyWaveScrawler) + +# transition +_ = avfilters.concat(in0_0, gl0_1, gl1_2, in2_1).output( + data.TEST_OUTPUTS_DIR / "3_transition.mp4", + vcodec="libx264", + v_profile="baseline", + preset="slow", + movflags="faststart", + pixel_format="yuv420p", +).run() + +# transition + image watermark +v_input = avfilters.concat(in0_0, gl0_1, gl1_2, in2_1) +i_input = input(data.I1).scale(w=100, h=100) +v_input.overlay(i_input, x=30, y=30).output( + data.TEST_OUTPUTS_DIR / "3_transition_image.mp4", + vcodec="libx264", + v_profile="baseline", + preset="slow", + movflags="faststart", + pixel_format="yuv420p", +).run() + +# transition + image watermark + text watermark +v_input = avfilters.concat(in0_0, gl0_1, gl1_2, in2_1). \ + drawtext(text="Watermark", x=150, y=150, fontsize=36, fontfile=data.FONT1) +i_input = input(data.I1).scale(w=100, h=100) +v_input.overlay(i_input, x=30, y=30).output( + data.TEST_OUTPUTS_DIR / "3_transition_image_text.mp4", + vcodec="libx264", + v_profile="baseline", + preset="slow", + movflags="faststart", + pixel_format="yuv420p", +).run() + +# transition + image watermark + text watermark + music +v_input = avfilters.concat(in0_0, gl0_1, gl1_2, in2_1). \ + drawtext(text="Watermark", x=150, y=150, fontsize=36, fontfile=data.FONT1) +i_input = input(data.I1).scale(w=100, h=100) +a_input = input(data.A1).audio +v_input.overlay(i_input, x=30, y=30).output( + a_input, + data.TEST_OUTPUTS_DIR / "3_transition_image_text_music.mp4", + acodec="copy", + vcodec="libx264", + v_profile="baseline", + shortest=True, + preset="slow", + movflags="faststart", + pixel_format="yuv420p", +).run() +``` + +## Video Sources + +### Play by FFplay + +```python +from ffmpeg import run_ffplay + +_ = run_ffplay("allrgb", f="lavfi") +_ = run_ffplay("allyuv", f="lavfi") +_ = run_ffplay("color=c=red@0.2:s=1600x900:r=10", f="lavfi") +_ = run_ffplay("haldclutsrc", f="lavfi") +_ = run_ffplay("pal75bars", f="lavfi") +_ = run_ffplay("allyuv", f="lavfi") +_ = run_ffplay("allyuv", f="lavfi") +_ = run_ffplay("rgbtestsrc=size=900x600:rate=60", f="lavfi") +_ = run_ffplay("smptebars=size=900x600:rate=60", f="lavfi") +_ = run_ffplay("smptehdbars=size=900x600:rate=60", f="lavfi") +_ = run_ffplay("testsrc=size=900x600:rate=60", f="lavfi") +_ = run_ffplay("testsrc2=s=900x600:rate=60", f="lavfi") +_ = run_ffplay("yuvtestsrc=s=900x600:rate=60", f="lavfi") +``` + +### Preview by FFmpeg + +```python +from ffmpeg import input_source + +_ = input_source("testsrc", size="900x600", rate=60).output(preview=True).run_async() +_ = input_source("testsrc2", size="900x600", rate=60).output(preview=True).run_async() +``` + +### Save Video from video sources + +```python +from ffmpeg import input_source + +_ = input_source("testsrc", size="900x600", rate=60, duration=30).output("source_testsrc.mp4").run() +``` + +## More Examples + +### Get Stream Info + +```python +from ffmpeg import FFprobe + +meta = FFprobe("path/to/file") + +# all stream +print(meta.metadata) + +# video stream +print(meta.video) +print(meta.video_duration) +print(meta.video_scale) + +# audio stream +print(meta.audio) +print(meta.audio_duration) +``` + +### Play a Video + +```python +from ffmpeg import ffplay_video +from tests import data + +ffplay_video(data.V1, vf='transpose=1') +ffplay_video(data.V1, vf='hflip') +ffplay_video(data.V1, af='atempo=2') +ffplay_video(data.V1, vf='setpts=PTS/2') +ffplay_video(data.V1, vf='transpose=1,setpts=PTS/2', af='atempo=2') +``` + +### Generate Thumbnail for Video + +```python +from ffmpeg import vtools + +vtools.generate_video_thumbnail(src="src", dst="dst", start_position=3, width=400, height=-1) +``` + +### Convert Video to Numpy Array + +```python +from ffmpeg import vtools + +vtools.convert_video_to_np_array(src="src") +``` + +### Read Single Video Frame as JPEG + +```python +from ffmpeg import vtools + +vtools.read_frame_as_jpeg(src="src", frame=10) +``` + +### Convert Sound to Raw PCM Audio + +```python +from ffmpeg import atools + +atools.convert_audio_to_raw_pcm(src="src") +``` + +### Assemble Video from Sequence of Frames + +```python +from ffmpeg import vtools + +# on Linux +vtools.assemble_video_from_images('/path/to/jpegs/*.jpg', pattern_type='glob', frame_rate=25) + +# on Windows +vtools.assemble_video_from_images('/path/to/jpegs/%02d.jpg', pattern_type=None, frame_rate=25) +``` + +> https://stackoverflow.com/questions/31201164/ffmpeg-error-pattern-type-glob-was-selected-but-globbing-is-not-support-ed-by + +With additional filtering: + +```python +import ffmpeg + +ffmpeg.input('/path/to/jpegs/*.jpg', pattern_type='glob', framerate=25). \ + filter('deflicker', mode='pm', size=10). \ + filter('scale', size='hd1080', force_original_aspect_ratio='increase'). \ + output('movie.mp4', crf=20, preset='slower', movflags='faststart', pix_fmt='yuv420p'). \ + view(save_path='filter_graph').run() +``` + +### Audio/Video Pipeline + +```python +import ffmpeg +from ffmpeg import avfilters + +in1 = ffmpeg.input("input.mp4") +in2 = ffmpeg.input("input.mp4") + +v1 = in1.video.hflip() +a1 = in2.audio + +v2 = in2.video.reverse().hue(s=0) +a2 = in2.audio.areverse().aphaser() + +joined = avfilters.concat(v1, a1, v2, a2, v=1, a=1).Node + +v3 = joined[0] +a3 = joined[1].volume(0.8) + +v3.output(a3, 'v1_v2_pipeline.mp4').run() +``` + +### Mono to Stereo with Offsets and Video + +```python +import ffmpeg +from ffmpeg import afilters +from tests import data + +input_video = ffmpeg.input(data.V1) +audio_left = ffmpeg.input(data.A1).atrim(start=15).asetpts("PTS-STARTPTS") +audio_right = ffmpeg.input(data.A1).atrim(start=10).asetpts("PTS-STARTPTS") + +afilters.join(audio_left, audio_right, inputs=2, channel_layout="stereo"). \ + output(input_video.video, "stereo_video.mp4", shortest=None, vcodec="copy").run() +``` + +### Process Frames + +- Decode input video with ffmpeg +- Process each video frame with python +- Encode output video with ffmpeg + +```python +import subprocess + +import numpy as np + +from ffmpeg import constants, FFprobe, input, settings +from tests import data + +settings.CUDA_ENABLE = False + + +def ffmpeg_input_process(src): + return input(src).output(constants.PIPE, format="rawvideo", + pixel_format="rgb24").run_async(pipe_stdout=True) + + +def ffmpeg_output_process(dst, width, height): + return input(constants.PIPE, format="rawvideo", pixel_format="rgb24", + width=width, height=height).output(dst, pixel_format="yuv420p"). \ + run_async(pipe_stdin=True) + + +def read_frame_from_stdout(process: subprocess.Popen, width, height): + frame_size = width * height * 3 + input_bytes = process.stdout.read(frame_size) + + if not input_bytes: + return + + assert len(input_bytes) == frame_size + + return np.frombuffer(input_bytes, np.uint8).reshape([height, width, 3]) + + +def process_frame_simple(frame): + # deep dream + return frame * 0.3 + + +def write_frame_to_stdin(process: subprocess.Popen, frame): + process.stdin.write(frame.astype(np.uint8).tobytes()) + + +def run(src, dst, process_frame): + width, height = FFprobe(src).video_scale + + input_process = ffmpeg_input_process(src) + output_process = ffmpeg_output_process(dst, width, height) + + while True: + input_frame = read_frame_from_stdout(input_process, width, height) + + if input_frame is None: + break + + write_frame_to_stdin(output_process, process_frame(input_frame)) + + input_process.wait() + + output_process.stdin.close() + output_process.wait() + + +if __name__ == '__main__': + run(data.SHORT0, data.TEST_OUTPUTS_DIR / "process_frame.mp4", process_frame_simple) +``` + +### FaceTime Webcam Input + +```python +import ffmpeg + +def facetime(): + ffmpeg.input("FaceTime", format="avfoundation", + pixel_format="uyvy422", framerate=30). \ + output("facetime.mp4", pixel_format="yuv420p", frame_size=100).run() +``` + +### Stream from a Local Video to HTTP Server + +```python +from ffmpeg import input + +input("video.mp4").output("http://127.0.0.1:8080", + codec="copy", # use same codecs of the original video + listen=1, # enables HTTP server + f="flv").\ + with_global_args("-re").\ + run() # argument to act as a live stream +``` + +To receive the video you can use ffplay in the terminal: + +```shell +ffplay -f flv http://localhost:8080 +``` + +### Stream from RTSP Server to TCP Socket + +```python +import socket +from ffmpeg import input + +server = socket.socket() +process = input('rtsp://%s:8554/default').\ + output('-', format='h264').\ + run_async(pipe_stdout=True) + +while process.poll() is None: + packet = process.stdout.read(4096) + try: + server.send(packet) + except socket.error: + process.stdout.close() + process.wait() + break +``` + +## Special Thanks + +- [The FFmpeg-Python Project](https://github.com/kkroening/ffmpeg-python) diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..57586c9 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,20 @@ +version: '3.5' + +# docker exec -it ffmpeg-generator_ffmpeg_1 bash +services: + + ffmpeg: + build: . + image: rustlekarl/ffmpeg-generator + restart: unless-stopped + volumes: + - ".:/generator" + + nginx-rtmp: + image: alqutami/rtmp-hls:latest-alpine + restart: unless-stopped + ports: + - "11935:1935" + - "11936:8080" + volumes: + - "./docker/nginx/conf/nginx_no-ffmpeg.conf:/etc/nginx/nginx.conf" diff --git a/docker/ffmpeg/cuda.Dockerfile b/docker/ffmpeg/cuda.Dockerfile new file mode 100644 index 0000000..dcd9baf --- /dev/null +++ b/docker/ffmpeg/cuda.Dockerfile @@ -0,0 +1,188 @@ +# docker build -t rustlekarl/ffmpeg-gpu:latest -t rustlekarl/ffmpeg-gpu:ubuntu20.04-cuda11.2.1 -f cuda.Dockerfile . +# Docker on Windows unsupports cuda. + +FROM nvidia/cuda:11.2.1-base-ubuntu20.04 + +MAINTAINER rustlekarl + +RUN echo "deb http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\n" >/etc/apt/sources.list + +RUN apt update && apt install -y ffmpeg + +WORKDIR /ffmpeg + +ARG NGINX_VERSION=1.18.0 +ARG NGINX_RTMP_VERSION=1.2.1 +ARG FFMPEG_VERSION=4.3.1 + + +############################## +# Build the NGINX-build image. +FROM ubuntu:18.04 as build-nginx +ARG NGINX_VERSION +ARG NGINX_RTMP_VERSION + +# Build dependencies. +RUN apt update && apt install -y \ + build-essential \ + cmake \ + ca-certificates \ + curl \ + gcc \ + libc-dev \ + make \ + musl-dev \ + openssl \ + libssl-dev \ + libpcre3 \ + libpcre3-dev \ + pkg-config \ + zlib1g-dev \ + wget + +# Get nginx source. +RUN cd /tmp && \ + wget https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz && \ + tar zxf nginx-${NGINX_VERSION}.tar.gz && \ + rm nginx-${NGINX_VERSION}.tar.gz + +# Get nginx-rtmp module. +RUN cd /tmp && \ + wget https://github.com/arut/nginx-rtmp-module/archive/v${NGINX_RTMP_VERSION}.tar.gz && \ + tar zxf v${NGINX_RTMP_VERSION}.tar.gz && rm v${NGINX_RTMP_VERSION}.tar.gz + +# Compile nginx with nginx-rtmp module. +RUN cd /tmp/nginx-${NGINX_VERSION} && \ + ./configure \ + --prefix=/usr/local/nginx \ + --add-module=/tmp/nginx-rtmp-module-${NGINX_RTMP_VERSION} \ + --conf-path=/etc/nginx/nginx.conf \ + --with-threads \ + --with-file-aio \ + --with-http_ssl_module \ + --with-debug \ + --with-cc-opt="-Wimplicit-fallthrough=0" && \ + cd /tmp/nginx-${NGINX_VERSION} && make && make install + +############################### +# Build the FFmpeg-build image. +FROM nvidia/cuda:11.1-devel-ubuntu20.04 as build-ffmpeg + +ENV DEBIAN_FRONTEND=noninteractive +ARG FFMPEG_VERSION +ARG PREFIX=/usr/local +ARG MAKEFLAGS="-j4" + +# FFmpeg build dependencies. +RUN apt update && apt install -y \ + build-essential \ + coreutils \ + cmake \ + libx264-dev \ + libx265-dev \ + libc6 \ + libc6-dev \ + libfreetype6-dev \ + libfdk-aac-dev \ + libmp3lame-dev \ + libogg-dev \ + libass9 \ + libass-dev \ + libnuma1 \ + libnuma-dev \ + libopus-dev \ + librtmp-dev \ + libvpx-dev \ + libvorbis-dev \ + libwebp-dev \ + libtheora-dev \ + libtool \ + libssl-dev \ + pkg-config \ + wget \ + yasm \ + git + +# Clone and install ffnvcodec +RUN cd /tmp && git clone https://git.videolan.org/git/ffmpeg/nv-codec-headers.git && \ + cd nv-codec-headers && make install + +# Get FFmpeg source. +RUN cd /tmp/ && \ + wget http://ffmpeg.org/releases/ffmpeg-${FFMPEG_VERSION}.tar.gz && \ + tar zxf ffmpeg-${FFMPEG_VERSION}.tar.gz && rm ffmpeg-${FFMPEG_VERSION}.tar.gz + +# Compile ffmpeg. +RUN cd /tmp/ffmpeg-${FFMPEG_VERSION} && \ + ./configure \ + --prefix=${PREFIX} \ + --enable-version3 \ + --enable-gpl \ + --enable-nonfree \ + --enable-small \ + --enable-libfdk-aac \ + --enable-openssl \ + --enable-libnpp \ + --enable-cuda \ + --enable-cuvid \ + --enable-nvenc \ + --enable-libnpp \ + --disable-debug \ + --disable-doc \ + --disable-ffplay \ + --extra-cflags=-I/usr/local/cuda/include \ + --extra-ldflags=-L/usr/local/cuda/lib64 \ + --extra-libs="-lpthread -lm" && \ + make && make install && make distclean + +# Cleanup. +RUN rm -rf /var/cache/* /tmp/* + +########################## +# Build the release image. +FROM nvidia/cuda:11.1-runtime-ubuntu20.04 +LABEL MAINTAINER Alfred Gutierrez + +ENV DEBIAN_FRONTEND=noninteractive +ENV NVIDIA_DRIVER_VERSION=455 +ENV NVIDIA_VISIBLE_DEVICES all +ENV NVIDIA_DRIVER_CAPABILITIES compute,video,utility + +# Set default ports. +ENV HTTP_PORT 80 +ENV HTTPS_PORT 443 +ENV RTMP_PORT 1935 + +# Set default options. +ENV SINGLE_STREAM "" +ENV MAX_MUXING_QUEUE_SIZE "" +ENV ANALYZEDURATION "" + +RUN apt update && apt install -y --no-install-recommends \ + ca-certificates \ + curl \ + gettext \ + libpcre3-dev \ + libnvidia-decode-${NVIDIA_DRIVER_VERSION} \ + libnvidia-encode-${NVIDIA_DRIVER_VERSION} \ + libtheora0 \ + openssl \ + rtmpdump + +COPY --from=build-nginx /usr/local/nginx /usr/local/nginx +COPY --from=build-nginx /etc/nginx /etc/nginx +COPY --from=build-ffmpeg /usr/local /usr/local +COPY --from=build-ffmpeg /usr/lib/x86_64-linux-gnu/libfdk-aac.so.1 /usr/lib/x86_64-linux-gnu/libfdk-aac.so.1 + +# Add NGINX path, config and static files. +ENV PATH "${PATH}:/usr/local/nginx/sbin" +RUN mkdir -p /opt/data && mkdir /www +ADD nginx-cuda.conf /etc/nginx/nginx.conf.template +ADD entrypoint.cuda.sh /opt/entrypoint.sh +RUN chmod gu+x /opt/entrypoint.sh +ADD static /www/static + +EXPOSE 1935 +EXPOSE 80 + +CMD /opt/entrypoint.sh diff --git a/docker/ffmpeg/gltransition.Dockerfile b/docker/ffmpeg/gltransition.Dockerfile new file mode 100644 index 0000000..6505e4e --- /dev/null +++ b/docker/ffmpeg/gltransition.Dockerfile @@ -0,0 +1,130 @@ +# This is a contributed example of how to build ffmpeg-gl-transions using Docker +# If you use Docker, this should get the job done +# if you don't use Docker, you could still run the commands +# manually and get the same result + +# docker build -t rustlekarl/ffmpeg-gltransition:n4.3.2-20210303 -t rustlekarl/ffmpeg-gltransition:latest -f docker/ffmpeg/gltransition.Dockerfile . +FROM ubuntu:20.04 + +MAINTAINER rustlekarl "rustlekarl@gmail.com" + +ENV FFMPEG_VERSION "n4.3.2" + +# everything is relative to /build +WORKDIR /build + +# enable contrib/non-free +RUN echo "deb http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\n" >/etc/apt/sources.list + +ARG DEBIAN_FRONTEND=noninteractive + +RUN export TZ=Asia/Shanghai + +# update anything needed +RUN apt-get -y update && apt-get -y upgrade + +# need dep +RUN apt-get -y install git \ + apt-utils \ + autoconf \ + automake \ + build-essential \ + cmake \ + g++ \ + gcc \ + git-core \ + libass-dev \ + libfdk-aac-dev \ + libfreetype6-dev \ + libglew-dev \ + libglfw3-dev \ + libglu1-mesa-dev \ + libgnutls28-dev \ + libmp3lame-dev \ + libopus-dev \ + libsdl2-dev \ + libtheora-dev \ + libtool \ + libva-dev \ + libvdpau-dev \ + libvorbis-dev \ + libvpx-dev \ + libx264-dev \ + libx265-dev \ + libxcb-shm0-dev \ + libxcb-xfixes0-dev \ + libxcb1-dev \ + libxvidcore-dev \ + make \ + nasm \ + pkg-config \ + texinfo \ + wget \ + xorg-dev \ + yasm \ + zlib1g-dev \ + gperf \ + libglew2.1 + +# get ffmpeg sources +RUN (cd /build; git clone -b "$FFMPEG_VERSION" https://gitee.com/fujiawei/FFmpeg.git ffmpeg) + +# get ffmpeg-gl-transition modifications +# this pulls from the original master for standalone use +# but you could modify to copy from your clone/repository +RUN (cd /build; git clone https://gitee.com/fujiawei/ffmpeg-gl-transition.git; cd ffmpeg-gl-transition; git clone https://gitee.com/fujiawei/gl-transitions.git; cd /build/ffmpeg; git apply /build/ffmpeg-gl-transition/ffmpeg.diff; grep -v "define GL_TRANSITION_USING_EGL" /build/ffmpeg-gl-transition/vf_gltransition.c > /build/ffmpeg/libavfilter/vf_gltransition.c) + +RUN (cd /build; git clone https://gitee.com/fujiawei/libass.git) + +RUN (cd /build; git clone https://gitee.com/fujiawei/mirror.git) + +RUN (cd /build; mv /build/mirror/freetype-2.10.4.tar.xz /build/freetype-2.10.4.tar.xz; tar -xf freetype-2.10.4.tar.xz; cd freetype-2.10.4; ./configure --prefix=/usr --enable-freetype-config --disable-static; make; make install) + +RUN (cd /build; mv /build/mirror/fribidi-1.0.9.tar.xz /build/fribidi-1.0.9.tar.xz; tar -xf fribidi-1.0.9.tar.xz; cd fribidi-1.0.9; ./configure --prefix=/usr; make; make install) + +RUN (cd /build; mv /build/mirror/nasm-2.15.05.tar.xz /build/nasm-2.15.05.tar.xz; tar -xf nasm-2.15.05.tar.xz; cd nasm-2.15.05; ./configure --prefix=/usr; make; make install) + +RUN (cd /build; mv /build/mirror/fontconfig-2.13.1.tar.bz2 /build/fontconfig-2.13.1.tar.bz2; tar -xf fontconfig-2.13.1.tar.bz2; cd fontconfig-2.13.1; rm -f src/fcobjshash.h; ./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --disable-docs --docdir=/usr/share/doc/fontconfig-2.13.1; make; make install) + +RUN (cd /build/libass; sh autogen.sh; ./configure --prefix=/usr --disable-static; make; make install) + +ENV PKG_CONFIG_PATH=/usr/local/ass/lib/pkgconfig:$PKG_CONFIG_PATH + +RUN (cd /build; git clone --depth=1 https://gitee.com/fujiawei/SVT-AV1; cd SVT-AV1; cd Build; cmake .. -G"Unix Makefiles" -DCMAKE_BUILD_TYPE=Release; make install) + +# RUN (cd /build; git clone https://gitee.com/fujiawei/x264.git; cd x264; ./configure --prefix=/usr --enable-static --enable-shared; make && make install) + +# configure/compile/install ffmpeg +RUN (cd /build/ffmpeg; ./configure --enable-gnutls --enable-gpl --enable-libass --enable-libfdk-aac --enable-libfreetype --enable-libmp3lame --enable-libopus --enable-libtheora --enable-libvorbis --enable-libvpx --enable-libx264 --enable-libx265 --enable-libxvid --enable-nonfree --enable-opengl --enable-filter=gltransition --extra-libs='-lGLEW -lglfw -ldl') + +# the -j speeds up compilation, but if your container host is limited on resources, you may need to remove it to force a non-parallel build to avoid memory usage issues +RUN (cd /build/ffmpeg; make -j && make install) + +# needed for running it +RUN apt-get -y install xvfb + +# try the demo +RUN (cd ffmpeg-gl-transition; ln -s /usr/local/bin/ffmpeg .) +RUN (cd ffmpeg-gl-transition; xvfb-run --auto-servernum -s '+iglx -screen 0 1920x1080x24' bash concat.sh) +# result would be in out.mp4 in that directory + +#COPY testdata /build/testdata +# +#RUN (cd /build/testdata; ln -s /usr/local/bin/ffmpeg .) +#RUN (cd /build/testdata; bash test_drawtext.sh; bash test_libx264.sh) +#RUN (cd /build/testdata; xvfb-run --auto-servernum -s '+iglx -screen 0 1920x1080x24' bash test_gltransition.sh) + +RUN rm -rf /build +RUN rm -rf /var/lib/apt/lists/* && apt-get -y purge + +WORKDIR /root + +# drop you into a shell to look around +# modify as needed for actual use +RUN echo "#!/bin/bash\nnohup Xvfb -ac :1 -screen 0 1280x1024x16 > /dev/null 2>&1 &\n/bin/bash" > /entrypoint.sh + +RUN chmod +x /entrypoint.sh + +ENV DISPLAY=:1 + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/ffmpeg/golang.Dockerfile b/docker/ffmpeg/golang.Dockerfile new file mode 100644 index 0000000..85224a0 --- /dev/null +++ b/docker/ffmpeg/golang.Dockerfile @@ -0,0 +1,56 @@ +# docker build -t rustlekarl/ffmpeg-golang:latest -t rustlekarl/ffmpeg-golang:ubuntu-focal -f golang.Dockerfile . + +FROM lsiobase/ffmpeg:bin as binstage +FROM lsiobase/ubuntu:focal + +MAINTAINER rustlekarl + +# Add files from binstage +COPY --from=binstage / / + +ARG DEBIAN_FRONTEND=noninteractive + +# hardware env +ENV \ + LIBVA_DRIVERS_PATH="/usr/lib/x86_64-linux-gnu/dri" \ + NVIDIA_DRIVER_CAPABILITIES="compute,video,utility" \ + NVIDIA_VISIBLE_DEVICES="all" + +ENV TZ="Asia/Shanghai" + +RUN echo "deb http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\n" >/etc/apt/sources.list + +# update anything needed +RUN apt-get -y update && apt-get -y upgrade + +# need dep +RUN \ + echo "**** install runtime ****" && \ + apt-get install -y \ + i965-va-driver \ + libexpat1 \ + libgl1-mesa-dri \ + libglib2.0-0 \ + libgomp1 \ + libharfbuzz0b \ + libv4l-0 \ + libx11-6 \ + libxcb1 \ + libxext6 \ + libxml2 + +# golang +RUN apt-get -y install golang-go make + +RUN go env -w GOPROXY=https://goproxy.cn,direct && go env -w GOSUMDB=off && go env -w GO111MODULE=on + +RUN \ + echo "**** clean up ****" && \ + rm -rf \ + /var/lib/apt/lists/* \ + /var/tmp/* + +# Set up project directory +WORKDIR "/ffmpeg" + +CMD /bin/bash diff --git a/docker/nginx/conf/nginx-nvenc.conf b/docker/nginx/conf/nginx-nvenc.conf new file mode 100644 index 0000000..4b3360a --- /dev/null +++ b/docker/nginx/conf/nginx-nvenc.conf @@ -0,0 +1,131 @@ +worker_processes auto; +#error_log logs/error.log; + +events { + worker_connections 1024; +} + +# RTMP configuration +rtmp { + server { + listen 1935; # Listen on standard RTMP port + chunk_size 4000; + # ping 30s; + # notify_method get; + + # This application is to accept incoming stream + application live { + live on; # Allows live input + + # for each received stream, transcode for adaptive streaming + # This single ffmpeg command takes the input and transforms + # the source into 4 different streams with different bitrates + # and qualities. # these settings respect the aspect ratio. + exec_push /app/ffmpeg/bin/ffmpeg -async 1 -vsync -1 -hwaccel cuvid -c:v h264_cuvid -i rtmp://localhost:1935/$app/$name + -c:v h264_nvenc -c:a aac -b:v 256k -b:a 64k -vf "scale_npp=480:trunc(ow/a/2)*2" -zerolatency 1 -f flv rtmp://localhost:1935/show/$name_low + -c:v h264_nvenc -c:a aac -b:v 768k -b:a 128k -vf "scale_npp=720:trunc(ow/a/2)*2" -zerolatency 1 -f flv rtmp://localhost:1935/show/$name_mid + -c:v h264_nvenc -c:a aac -b:v 1024k -b:a 128k -vf "scale_npp=960:trunc(ow/a/2)*2" -zerolatency 1 -f flv rtmp://localhost:1935/show/$name_high + -c:v h264_nvenc -c:a aac -b:v 1920k -b:a 128k -vf "scale_npp=1280:trunc(ow/a/2)*2" -zerolatency 1 -f flv rtmp://localhost:1935/show/$name_hd720 + -c copy -f flv rtmp://localhost:1935/show/$name_src; + } + + # This is the HLS application + application show { + live on; # Allows live input from above application + deny play all; # disable consuming the stream from nginx as rtmp + + hls on; # Enable HTTP Live Streaming + hls_fragment 3; + hls_playlist_length 20; + hls_path /mnt/hls/; # hls fragments path + # Instruct clients to adjust resolution according to bandwidth + hls_variant _src BANDWIDTH=4096000; # Source bitrate, source resolution + hls_variant _hd720 BANDWIDTH=2048000; # High bitrate, HD 720p resolution + hls_variant _high BANDWIDTH=1152000; # High bitrate, higher-than-SD resolution + hls_variant _mid BANDWIDTH=448000; # Medium bitrate, SD resolution + hls_variant _low BANDWIDTH=288000; # Low bitrate, sub-SD resolution + + # MPEG-DASH + dash on; + dash_path /mnt/dash/; # dash fragments path + dash_fragment 3; + dash_playlist_length 20; + } + } +} + + +http { + sendfile off; + tcp_nopush on; + directio 512; + # aio on; + + # HTTP server required to serve the player and HLS fragments + server { + listen 8080; + + # Serve HLS fragments + location /hls { + types { + application/vnd.apple.mpegurl m3u8; + video/mp2t ts; + } + + root /mnt; + + add_header Cache-Control no-cache; # Disable cache + + # CORS setup + add_header 'Access-Control-Allow-Origin' '*' always; + add_header 'Access-Control-Expose-Headers' 'Content-Length'; + + # allow CORS preflight requests + if ($request_method = 'OPTIONS') { + add_header 'Access-Control-Allow-Origin' '*'; + add_header 'Access-Control-Max-Age' 1728000; + add_header 'Content-Type' 'text/plain charset=UTF-8'; + add_header 'Content-Length' 0; + return 204; + } + } + + # Serve DASH fragments + location /dash { + types { + application/dash+xml mpd; + video/mp4 mp4; + } + + root /mnt; + + add_header Cache-Control no-cache; # Disable cache + + + # CORS setup + add_header 'Access-Control-Allow-Origin' '*' always; + add_header 'Access-Control-Expose-Headers' 'Content-Length'; + + # Allow CORS preflight requests + if ($request_method = 'OPTIONS') { + add_header 'Access-Control-Allow-Origin' '*'; + add_header 'Access-Control-Max-Age' 1728000; + add_header 'Content-Type' 'text/plain charset=UTF-8'; + add_header 'Content-Length' 0; + return 204; + } + } + + # This URL provides RTMP statistics in XML + location /stat { + rtmp_stat all; + rtmp_stat_stylesheet stat.xsl; # Use stat.xsl stylesheet + } + + location /stat.xsl { + # XML stylesheet to view RTMP stats. + root /app/nginx/html; + } + + } +} diff --git a/docker/nginx/conf/nginx.conf b/docker/nginx/conf/nginx.conf new file mode 100644 index 0000000..4affbcd --- /dev/null +++ b/docker/nginx/conf/nginx.conf @@ -0,0 +1,131 @@ +worker_processes auto; +#error_log logs/error.log; + +events { + worker_connections 1024; +} + +# RTMP configuration +rtmp { + server { + listen 1935; # Listen on standard RTMP port + chunk_size 4000; + # ping 30s; + # notify_method get; + + # This application is to accept incoming stream + application live { + live on; # Allows live input + + # for each received stream, transcode for adaptive streaming + # This single ffmpeg command takes the input and transforms + # the source into 4 different streams with different bitrates + # and qualities. # these settings respect the aspect ratio. + exec_push /usr/local/bin/ffmpeg -i rtmp://localhost:1935/$app/$name -async 1 -vsync -1 + -c:v libx264 -c:a aac -b:v 256k -b:a 64k -vf "scale=480:trunc(ow/a/2)*2" -tune zerolatency -preset superfast -crf 23 -f flv rtmp://localhost:1935/show/$name_low + -c:v libx264 -c:a aac -b:v 768k -b:a 128k -vf "scale=720:trunc(ow/a/2)*2" -tune zerolatency -preset superfast -crf 23 -f flv rtmp://localhost:1935/show/$name_mid + -c:v libx264 -c:a aac -b:v 1024k -b:a 128k -vf "scale=960:trunc(ow/a/2)*2" -tune zerolatency -preset superfast -crf 23 -f flv rtmp://localhost:1935/show/$name_high + -c:v libx264 -c:a aac -b:v 1920k -b:a 128k -vf "scale=1280:trunc(ow/a/2)*2" -tune zerolatency -preset superfast -crf 23 -f flv rtmp://localhost:1935/show/$name_hd720 + -c copy -f flv rtmp://localhost:1935/show/$name_src; + } + + # This is the HLS application + application show { + live on; # Allows live input from above application + deny play all; # disable consuming the stream from nginx as rtmp + + hls on; # Enable HTTP Live Streaming + hls_fragment 3; + hls_playlist_length 20; + hls_path /mnt/hls/; # hls fragments path + # Instruct clients to adjust resolution according to bandwidth + hls_variant _src BANDWIDTH=4096000; # Source bitrate, source resolution + hls_variant _hd720 BANDWIDTH=2048000; # High bitrate, HD 720p resolution + hls_variant _high BANDWIDTH=1152000; # High bitrate, higher-than-SD resolution + hls_variant _mid BANDWIDTH=448000; # Medium bitrate, SD resolution + hls_variant _low BANDWIDTH=288000; # Low bitrate, sub-SD resolution + + # MPEG-DASH + dash on; + dash_path /mnt/dash/; # dash fragments path + dash_fragment 3; + dash_playlist_length 20; + } + } +} + + +http { + sendfile off; + tcp_nopush on; + directio 512; + # aio on; + + # HTTP server required to serve the player and HLS fragments + server { + listen 8080; + + # Serve HLS fragments + location /hls { + types { + application/vnd.apple.mpegurl m3u8; + video/mp2t ts; + } + + root /mnt; + + add_header Cache-Control no-cache; # Disable cache + + # CORS setup + add_header 'Access-Control-Allow-Origin' '*' always; + add_header 'Access-Control-Expose-Headers' 'Content-Length'; + + # allow CORS preflight requests + if ($request_method = 'OPTIONS') { + add_header 'Access-Control-Allow-Origin' '*'; + add_header 'Access-Control-Max-Age' 1728000; + add_header 'Content-Type' 'text/plain charset=UTF-8'; + add_header 'Content-Length' 0; + return 204; + } + } + + # Serve DASH fragments + location /dash { + types { + application/dash+xml mpd; + video/mp4 mp4; + } + + root /mnt; + + add_header Cache-Control no-cache; # Disable cache + + + # CORS setup + add_header 'Access-Control-Allow-Origin' '*' always; + add_header 'Access-Control-Expose-Headers' 'Content-Length'; + + # Allow CORS preflight requests + if ($request_method = 'OPTIONS') { + add_header 'Access-Control-Allow-Origin' '*'; + add_header 'Access-Control-Max-Age' 1728000; + add_header 'Content-Type' 'text/plain charset=UTF-8'; + add_header 'Content-Length' 0; + return 204; + } + } + + # This URL provides RTMP statistics in XML + location /stat { + rtmp_stat all; + rtmp_stat_stylesheet stat.xsl; # Use stat.xsl stylesheet + } + + location /stat.xsl { + # XML stylesheet to view RTMP stats. + root /usr/local/nginx/html; + } + + } +} \ No newline at end of file diff --git a/docker/nginx/conf/nginx_no-ffmpeg.conf b/docker/nginx/conf/nginx_no-ffmpeg.conf new file mode 100644 index 0000000..e59112e --- /dev/null +++ b/docker/nginx/conf/nginx_no-ffmpeg.conf @@ -0,0 +1,115 @@ +worker_processes auto; +#error_log logs/error.log; + +events { + worker_connections 1024; +} + +# RTMP configuration +rtmp { + server { + listen 1935; # Listen on standard RTMP port + chunk_size 4000; + # ping 30s; + # notify_method get; + + # This application is to accept incoming stream + application live { + live on; # Allows live input + push rtmp://localhost:1935/show; + } + + # This is the HLS application + application show { + live on; # Allows live input from above application + deny play all; # disable consuming the stream from nginx as rtmp + + hls on; # Enable HTTP Live Streaming + hls_fragment 3; + hls_playlist_length 10; + hls_path /mnt/hls/; # hls fragments path + + # MPEG-DASH + dash on; + dash_path /mnt/dash/; # dash fragments path + dash_fragment 3; + dash_playlist_length 10; + } + } +} + + +http { + sendfile off; + tcp_nopush on; + directio 512; + # aio on; + + # HTTP server required to serve the player and HLS fragments + server { + listen 8080; + + # Serve HLS fragments + location /hls { + types { + application/vnd.apple.mpegurl m3u8; + video/mp2t ts; + } + + root /mnt; + + add_header Cache-Control no-cache; # Disable cache + + # CORS setup + add_header 'Access-Control-Allow-Origin' '*' always; + add_header 'Access-Control-Expose-Headers' 'Content-Length'; + + # allow CORS preflight requests + if ($request_method = 'OPTIONS') { + add_header 'Access-Control-Allow-Origin' '*'; + add_header 'Access-Control-Max-Age' 1728000; + add_header 'Content-Type' 'text/plain charset=UTF-8'; + add_header 'Content-Length' 0; + return 204; + } + } + + # Serve DASH fragments + location /dash { + types { + application/dash+xml mpd; + video/mp4 mp4; + } + + root /mnt; + + add_header Cache-Control no-cache; # Disable cache + + + # CORS setup + add_header 'Access-Control-Allow-Origin' '*' always; + add_header 'Access-Control-Expose-Headers' 'Content-Length'; + + # Allow CORS preflight requests + if ($request_method = 'OPTIONS') { + add_header 'Access-Control-Allow-Origin' '*'; + add_header 'Access-Control-Max-Age' 1728000; + add_header 'Content-Type' 'text/plain charset=UTF-8'; + add_header 'Content-Length' 0; + return 204; + } + } + + # This URL provides RTMP statistics in XML + location /stat { + rtmp_stat all; + rtmp_stat_stylesheet stat.xsl; # Use stat.xsl stylesheet + } + + location /stat.xsl { + # XML stylesheet to view RTMP stats. + root /usr/local/nginx/html; + } + + } +} \ No newline at end of file diff --git a/docker/nginx/conf/nginx_rtmp_minimal_no-stats.conf b/docker/nginx/conf/nginx_rtmp_minimal_no-stats.conf new file mode 100644 index 0000000..b1bc6b7 --- /dev/null +++ b/docker/nginx/conf/nginx_rtmp_minimal_no-stats.conf @@ -0,0 +1,14 @@ +worker_processes auto; +rtmp_auto_push on; +events {} +rtmp { + server { + listen 1935; + listen [::]:1935; + + application live { + live on; + record off; + } + } +} \ No newline at end of file diff --git a/docker/nginx/players/dash.html b/docker/nginx/players/dash.html new file mode 100644 index 0000000..12b8df7 --- /dev/null +++ b/docker/nginx/players/dash.html @@ -0,0 +1,23 @@ + + + + + DASH Live Streaming + + + + +

DASH Player

+ + + + + + + diff --git a/docker/nginx/players/hls.html b/docker/nginx/players/hls.html new file mode 100644 index 0000000..15d95b4 --- /dev/null +++ b/docker/nginx/players/hls.html @@ -0,0 +1,23 @@ + + + + + HLS Live Streaming + + + + +

HLS Player

+ + + + + + + diff --git a/docker/nginx/players/hls_hlsjs.html b/docker/nginx/players/hls_hlsjs.html new file mode 100644 index 0000000..0237e7a --- /dev/null +++ b/docker/nginx/players/hls_hlsjs.html @@ -0,0 +1,41 @@ + + + + + HLS streaming + + + + + + + + + + +

HLS Player (using hls.js)

+ +
+
+ +
+
+ + + + + + + diff --git a/docker/nginx/players/rtmp.html b/docker/nginx/players/rtmp.html new file mode 100644 index 0000000..d8ce856 --- /dev/null +++ b/docker/nginx/players/rtmp.html @@ -0,0 +1,24 @@ + + + + + RTMP Live Streaming + Live Streaming + + + + + + + +

RTMP Player

+ + + + + diff --git a/docker/nginx/players/rtmp_hls.html b/docker/nginx/players/rtmp_hls.html new file mode 100644 index 0000000..35617e9 --- /dev/null +++ b/docker/nginx/players/rtmp_hls.html @@ -0,0 +1,30 @@ + + + + + Live Streaming + + + + + + + + +

RTMP Player

+ + +

HLS Player

+ + + + + diff --git a/docker/nginx/rtmp-alpine.yaml b/docker/nginx/rtmp-alpine.yaml new file mode 100644 index 0000000..353fb82 --- /dev/null +++ b/docker/nginx/rtmp-alpine.yaml @@ -0,0 +1,14 @@ +# docker run -d -p 1935:1935 -p 8080:8080 -v custom.conf:/etc/nginx/nginx.conf alqutami/rtmp-hls:latest-alpine + +# docker-compose -f rtmp-alpine.yaml up +version: "3.5" + +services: + nginx-rtmp: + image: alqutami/rtmp-hls:latest-alpine + restart: always + ports: + - "1935:1935" + - "8080:8080" + volumes: + - "./conf/nginx_no-ffmpeg.conf:/etc/nginx/nginx.conf" diff --git a/docker/python/3.8-ubuntu20.04.Dockerfile b/docker/python/3.8-ubuntu20.04.Dockerfile new file mode 100644 index 0000000..ec7aa20 --- /dev/null +++ b/docker/python/3.8-ubuntu20.04.Dockerfile @@ -0,0 +1,27 @@ +# Docker file for a slim Ubuntu:20.04-based Python3.8 image +# docker build -t rustlekarl/python:3.8-ubuntu20.04 -t rustlekarl/python:latest -f docker/python/3.8-ubuntu20.04.Dockerfile . +FROM ubuntu:20.04 + +MAINTAINER rustlekarl "rustlekarl@gmail.com" + +ENV DEBIAN_FRONTEND=noninteractive + +RUN echo "deb http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\n" >/etc/apt/sources.list +RUN echo "[global]\nindex-url=http://mirrors.aliyun.com/pypi/simple/\n[install]\ntrusted-host=mirrors.aliyun.com" > /etc/pip.conf + +RUN apt-get update \ + && apt-get install -y python3-pip python3-dev \ + && cd /usr/local/bin \ + && ln -s /usr/bin/python3 python \ + && ln -s /usr/bin/pip3 pip \ + && pip3 --no-cache-dir install --upgrade pip + +RUN rm -rf /var/lib/apt/lists/* && apt-get -y purge + +# drop you into a shell to look around +# modify as needed for actual use +RUN echo "#!/bin/bash\n/bin/bash" > /entrypoint.sh + +RUN chmod +x /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/docs/ffplay.md b/docs/ffplay.md new file mode 100644 index 0000000..e800271 --- /dev/null +++ b/docs/ffplay.md @@ -0,0 +1,190 @@ + + +# FFplay 常用命令 + +## 播放控制 + +| 选项 | 说明 | +| ------------ | ------------ | +| q,ESC | 退出播放 | +| f | 全屏切换 | +| p,SPC | 暂停 | +| m | 静音播放 | +| 9,0 | 9 减少音量,0 增加音量 | +| a | 循环切换音频流 | +| v | 循环切换视频流 | +| t | 循环切换字幕流 | +| c | 循环切换节目 | +| w | 循环切换过滤器或显示模式 | +| s | 逐帧播放 | +| left/right | 向后/向前拖动 10 秒 | +| down/up | 向后/向前拖动 1 分钟 | +| 鼠标右键单击 | 拖动与显示宽度对应百分比的文件进行播放 | +| 鼠标左键双击 | 全屏切换 | + +## 命令选项 + +| 主要选项 | 说明 | +| -------------- | -------------- | +| -x | 强制显示宽带 | +| -y height | 强制显示高度 | +| -video_size | 帧尺寸设置显示帧存储(WxH 格式),仅适用于类似原始 YUV 等没有包含帧大小(WxH)的视频,如果设备不支持该分辨率则报错 | +| -pixel_format | 格式设置像素格式 | +| -fs | 以全屏模式启动 | +| -an | 禁止音频(不播放声音) | +| -vn | 禁止视频(不播放视频) | +| -sn | 禁用字幕(不显示字幕) | +| -ss pos | 根据设置的秒进行定位拖动 | +| -t duration | 设置播放视频/音频长度 | +| -bytes | 按字节进行定位拖动(0=off 1=on -1=auto) | +| -seek_interval | 自定义左/右键定位拖动间隔(以秒为单位),默认10s | +| -nodisp | 关闭图形化显示窗口,视频将不显示 | +| -noborder | 无边框窗口 | +| -volume | 设置起始音量,range[0,100] | +| -f | 强制使用设置的格式进行解析,比如 `-f s16le` | +| -window_title | 设置窗口标题(默认为输入文件名) | +| -loop | 设置播放循环次数 | +| -showmode | 设置显示模式,0 显示视频,1 显示音频波形,2 显示音频频谱,缺省值为 0,如果视频不存在则自动选择 2 | +| -vf | 设置视频滤镜 | +| -af | 设置音频滤镜 | + +## 高级选项 + +| 选项 | 说明 | +| -------------- | -------------- | +| -stats | 打印多个回放统计信息。包括显示流持续时间,编解码器参数,流中的当前位置,以及音频/视频同步差值。缺省值是自动开启,显示禁用指定-stats | +| -fast | 非标准化规范的多媒体兼容优化 | +| -genpts | 生产pts | +| -sync | 同步类型,将主时钟设置为audio,video或external,默认是audio | +| -ast | audio_stream_specifier 指定音频流索引,比如-ast 3,播放流索引为3的音频流 | +| -vst | video_stream_specifier 指定视频流索引 | +| -sst | subtitle_stream_specifier 指定字幕流索引 | +| -autoexit | 视频播放完毕后退出 | +| -exitonkeydown | 键盘按下任何键退出播放 | +| -exitonmousedown | 鼠标按下任何键退出播放 | +| -codec:media_specifier | 强制使用设置的多媒体解码器,a(音频),v(视频)和s(字幕),如 -codec:v h264_qsv | +| -acodec | 强制使用设置的音频解码器进行音频解码 | +| -vcodec | 强制使用设置的视频解码器进行视频解码 | +| -scodec | 强制使用设置的字幕解码器进行字幕解码 | +| -autorotate | 根据文件元数据自动旋转视频。值为0或1,默认为1 | +| -framedrop | 如果视频不同步则丢弃视频帧,当主时钟非视频时钟时默认开启,若需禁用使用选项-noframedrop | +| -inbuf | 不限制输入缓冲区大小,尽可能地从输入中读取尽可能多的数据。 | + +## 过滤器 + +> 似乎不支持复杂滤镜 + +| 例子 | 命令 | +| -------------- | -------------- | +| 视频旋转 | ffplay -i test.mp4 -vf transpose=1 | +| 视频反转 | ffplay test.mp4 -vf hflip, ffplay test.mp4 -vf vflip | +| 视频旋转和反转 | ffplay test.mp4 -vf hflip,transpose=1 | +| 音频变速播放 | ffplay -i test.mp4 -af atempo=2 | +| 视频变速播放 | ffplay -i test.pm4 -vf setpts=PTS/2 | +| 音视频同时变速播放 | ffplay -i test.mp4 -vf setpts=PTS/2 -af atempo=2 | + +## 代码示例 + +```python +from ffmpeg import ffplay_video +from tests import data + +ffplay_video(data.V1, vf='transpose=1') +ffplay_video(data.V1, vf='hflip') +ffplay_video(data.V1, af='atempo=2') +ffplay_video(data.V1, vf='setpts=PTS/2') +ffplay_video(data.V1, vf='transpose=1,setpts=PTS/2', af='atempo=2') +``` + +## 命令行示例 + +### 播放一个音频文件 + +```shell +ffplay audio.aac +``` + +这时候会弹出一个窗口,一边播放音频文件,一边将播放声音的语谱图画到该窗口上。针对该窗口的操作如下,点击窗口的任意一个位置,ffplay 会按照点击的位置计算出时间的进度,然后跳到这个时间点上继续播放;按下键盘上的右键会默认快进 10s,左键默认后退 10s,上键默认快进 1min,下键默认后退 1min;按 ESC 键就是退出播放进程;如果按 w 键则将绘制音频的波形图等。 + +### 播放一个视频文件 + +```shell +ffplay video.mp4 +``` + +这时候会直接在新弹出的窗口上播放该视频,如果想要同时播放多个文件,那么只需要在多个命令行下同时执行 ffplay 就可以了,按 s 键则可以进入 frame-step 模式,即按 s 键一次就会播放下一帧图像。 + +### 从第 30 秒开始播放 10 秒 + +```shell +# 从第 30 秒开始播放 10 秒 +ffplay -ss 30 -t 10 long.mp4 +``` + +### 循环播放 + +```shell +ffplay video.mp4 -loop 10 +``` + +### 播放视频中的第一路音频流 + +```shell +ffplay video.mkv -ast 1 +``` + +### 表示播放视频中的第一路视频流 + +```shell +ffplay video.mkv -vst 1 +``` + +## 播放裸数据 + +### 播放 PCM 格式的音频 + +```shell +ffplay song.pcm -f s16le -channels 2 -ar 4 +``` + +格式(-f)、声道数(-channels)、采样率(-ar)必须设置正确。其中任何一项参数设置不正确,都不会得到正常的播放结果。 + +WAV 格式的文件称为无压缩的格式,其实就是在 PCM 的头部添加 44 个字节,用于标识这个 PCM 的采样表示格式、声道数、采样率等信息,对于 WAV 格式音频文件,ffplay 可以直接播放,但是若让 ffplay 播放 PCM 裸数据的话,只要为其提供上述三个主要的信息,那么它就可以正确地播放了。 + +### 播放 YUV420P 格式的视频帧 + +```shell +ffplay -f rawvideo -pixel_format yuv420p -s 480*480 texture.yuv +``` + +格式(-f rawvideo代表原始格式)、表示格式(-pixel_format yuv420p)、宽高(-s 480*480)。 + +### 播放 RGB 的原始数据 + +```shell +ffplay -f rawvideo -pixel_format rgb24 -s 480*480 texture.rgb +``` + +## 音画同步 + +FFplay 中音画同步的实现方式其实有三种:以音频为主时间轴作为同步源;以视频为主时间轴作为同步源;以外部时钟为主时间轴作为同步源。在 ffplay 中默认的对齐方式也是以音频为基准进行对齐的。 + +播放器接收到的视频帧或者音频帧,内部都会有时间戳(PTS 时钟)来标识它实际应该在什么时刻进行展示。实际的对齐策略如下:比较视频当前的播放时间和音频当前的播放时间,如果视频播放过快,则通过加大延迟或者重复播放来降低视频播放速度;如果视频播放慢了,则通过减小延迟或者丢帧来追赶音频播放的时间点。关键就在于音视频时间的比较以及延迟的计算,在比较的过程中会设置一个阈值(Threshold),若超过预设的阈值就应该做调整(丢帧渲染或者重复渲染)。 + +### 指定对齐方式 + +```shell +# 以音频为主时间轴 +ffplay 32037.mp4 -sync audio + +# 以视频为主时间轴 +ffplay 32037.mp4 -sync video + +# 以外部时钟为主时间轴 +ffplay 32037.mp4 -sync ext +``` diff --git a/docs/vfilters.md b/docs/vfilters.md new file mode 100644 index 0000000..28da7a2 --- /dev/null +++ b/docs/vfilters.md @@ -0,0 +1,7898 @@ + + +FFmpeg 自带了大概有 450 多个音视频媒体滤镜,基本上涵盖了视频处理的绝大部分功能。但是由于对这些滤镜不可能了如指掌,甚至大部分滤镜都没见过,就可能不知道如何实现一个需求,所以还是有必要尝试一遍 FFmpeg 的全部滤镜吧。所以就决定写一下这个系列,力求给出每个滤镜的示例及最终结果对比视频/图,但也有可能中途夭折。 + +该系列文章的 FFmpeg 命令都是由 FFmepg-Generator 生成的,不论是出于复用考量,还是方便书写,记原生命令太为难普通人了。不过该项目现在还在测试中,暂时未开源。 + +FFmpeg 的滤镜参数可以是位置参数,即不提供参数名,只提供参数值,这种情况下参数的顺序是不可以改变的;另一种的关键词参数,以键值对的形式指明参数,位置可不限。 + +颜色、尺寸、时间等表达式的语法规则: + +> https://ffmpeg.org/ffmpeg-utils.html + +部分实在是用不到的或者暂时不理解用处的滤镜暂时省略,今后如有用到再补。 + +```python +from ffmpeg import input, merge_outputs, vfilters, vtools +``` + +## addroi + +> https://ffmpeg.org/ffmpeg-filters.html#addroi + +ROI(region of interest),感兴趣区域。机器视觉、图像处理中,从被处理的图像以方框、圆、椭圆、不规则多边形等方式勾勒出需要处理的区域,称为感兴趣区域,ROI。 + +该滤镜用于指定视频中若干个区域为感兴趣区域,但是视频帧信息不改变,只是在 metadata 中添加 roi 的信息,影响稍后的编码过程,通过多次应用该滤镜可以标记多个区域。 + +### 参数 + +- x 与帧左边的像素距离 +- y 与帧上边的像素距离 +- w 区域像素宽,iw 表示输入帧宽 +- h 区域像素高,ih 表示输入帧高 +- qoffset 标记区域编码质量偏移,介于 -1 到 1,负值表示更高质量,0 表示保持质量,正值表示更低质量,可以认为是不感兴趣 +- clear 设置为 true 则移除全部已存在的区域标记 + +### 示例 - 标记视频的中心区域 + +为了对比明显,设置了最差质量。 + +```python +_ = input(src).addroi(x="iw/4", y="ih/4", w="iw/2", h="ih/2", qoffset=1, clear=1).output(dst).run() +``` + +> 该代码可生成下面的命令并执行,最后给出运行时间。 + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]addroi=clear=1:h=ih/2:qoffset=1:w=iw/2:x=iw/4:y=ih/4[tag0]" -map [tag0] testdata\media\v0_addroi.mp4 -y -hide_banner +[0.5302s] +``` + +#### 对比 + +原视频 221 KB,合成视频 202 KB。对比视频(默认左边原视频): + +```python +vtools.compare_2_videos(src, dst, src_dst_compare) +``` + +> 之后对比用的代码都是一样,所以就这里写一次。 + +``` +ffmpeg -i testdata\media\0.mp4 -i testdata\media\v0_addroi.mp4 -filter_complex "[0]pad=w=2*iw[tag0];[tag0][1]overlay=x=w[tag1]" -map [tag1] testdata\media\v0_addroi_compare.mp4 -y -hide_banner +[1.0771s] +``` + +https://www.bilibili.com/video/BV1iv411h7Eq/ + +## alphaextract + +> https://ffmpeg.org/ffmpeg-filters.html#alphaextract + +从输入中提取 alpha 分量作为灰度视频。这对于 `alphamerge` 过滤器特别有用。 + +### 参数 + +无 + +### 示例 + +首先要确定输入的视频有 alpha 通道,不存在则报错。即必须是 RGBA 格式的,不然会提取失败。RGBA 格式是在普通的 RGB 格式基础上增长了一个 alpha 分量,该分量用于表示图像的透明度。 + +太遗憾了,我找不到这种符合格式的视频,最终用了 PNG 格式的图片代替视频测试: + +```python +_ = input(src).alphaextract().output(dst).run() +``` + +``` +ffmpeg -i testdata\i2.png -filter_complex "[0]alphaextract[tag0]" -map [tag0] testdata\i2_alphaextract.png -y -hide_banner +[0.1415s] +``` + +#### 对比 + +![](https://i.loli.net/2021/03/10/piTZjoF253lG8Dg.png) + +## alphamerge + +> https://ffmpeg.org/ffmpeg-filters.html#alphamerge + +用第二个输入视频的灰度值添加或替换第一个输入视频的 alpha 分量。 可以与 `alphaextract` 一起使用,以允许传输或存储具有 alpha 格式但不支持 alpha 通道的帧序列。 + +### 参数 + +无 + +### 示例 + +```python +_ = input(src).movie(filename=i2_alpha.as_posix()).alphamerge().output(dst).run() +``` + +``` +ffmpeg -i testdata\i3.png -filter_complex "movie=filename=testdata/i2_alpha.png[tag0];[0][tag0]alphamerge[tag1]" -map [tag1] testdata\i3_alphamerge.png -y -hide_banner +``` + +等价写法: + +```python +_ = input(src).alphamerge(input(i2_alpha)).output(dst).run() +``` + +``` +ffmpeg -i testdata\i3.png -i testdata\i2_alpha.png -filter_complex "[0][1]alphamerge[tag0]" -map [tag0] testdata\i3_alphamerge.png -y -hide_banner +[0.1514s] +``` + +#### 对比 + +找不到可用视频素材,仍以图片测试,黑色部分表示透明。 + +![](https://i.loli.net/2021/03/11/5xqBnRTLoVUfzGZ.png) + +## amplify + +> https://ffmpeg.org/ffmpeg-filters.html#amplify + +放大当前帧和相邻帧相同位置的像素差异。 + +### 参数 + +- radius 设置取相邻帧的数量,范围 1 ~ 63,默认 2,比如设置为 3,即取前后各 3 帧加上当前帧,计算 7 帧的平均值 +- factor 设置差异放大因子,范围 0 ~ 65535,默认 2 +- threshold 设置差异上限,范围 0 ~ 65535,默认 2,任何大于等于该上限的像素差异都不会放大(差异已经够大,所以不再放大)。 +- tolerance 设置差异下限,范围 0 ~ 65535,默认 0,任何小于该下限的像素差异都不会放大(差异过小)。 +- low 设置更改源像素的下限。默认是 65535。取值范围是 0 ~ 65535。降低源像素值的最大可能值。 +- high 设置更改源像素的上限。 默认值为 65535。允许的范围是 0 到 65535。增加源像素值的最大可能值。 +- planes 设置进行处理的通道。 默认为全部。 允许范围是 0 到 15。 + +### 示例 + +```python +_ = input(src).amplify(radius=3, factor=10, threshold=50).output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]amplify=factor=10:radius=3:threshold=50[tag0]" -map [tag0] testdata\media\v0_amplify.mp4 -y -hide_banner +[1.0090s] +``` + +#### 对比 + +https://www.bilibili.com/video/BV1V54y1a7Um + +## ass + +> https://ffmpeg.org/ffmpeg-filters.html#ass + +可添加 ASS 格式的字幕。参数及用法与 subtitles 滤镜基本一致。 + +### 参数 + +- shaping 设置渲染引擎。 + - auto 默认的 `libass` 引擎 + - simple `font-agnostic` 引擎 + - complex `OpenType` 引擎 + +其他参数与 subtitles 滤镜相同。 + +### 示例 + +字幕文件由[Aegisub](https://aegi.vmoe.info/downloads/)软件手工制作。 + +```python +_ = input(src).ass(filename=media_v0_ass.as_posix()).output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]ass=filename=testdata/media/0.ass[tag0]" -map [tag0] testdata\media\v0_ass.mp4 -y -hide_banner +[0.6707s] +``` + +#### 对比 + +https://www.bilibili.com/video/BV1Fv411a7L1/ + +## atadenoise + +> https://ffmpeg.org/ffmpeg-filters.html#atadenoise + +自适应时域平均降噪器 (Adaptive Temporal Averaging Denoiser)。 + +### 参数 + +- 0a 设置 1 通道阈值 A,范围 0 ~ 0.3,默认 0.02 +- 0b 设置 1 通道阈值 B,范围 0 ~ 5,默认0.04 +- 1a 同上类推 +- 1b 同上类推 +- 2a 同上类推 +- 2b 同上类推 +- s 设置用于平均的帧数,范围 5 到 129 的奇数,默认 9 +- p 设置通道,默认全部 +- a 设置算法过滤器将用于平均的变量。 默认为 p 并行。 也可以将其设置为 s 串行。并行可以比串行更快。 并行将在第一个变化大于阈值时提前中止,而串行将继续处理帧的另一侧(等于或小于阈值时)。 +- 0s 同下类推 +- 1s 同下类推 +- 2s 为通道设置 sigma。 默认值为 32767。有效范围是 0 到 32767。控制由尺寸定义的半径中每个像素的权重。 默认值表示每个像素具有相同的权重。 设置为 0 可以有效地禁用过滤。 + +### 示例 + +```python +_ = input(src).atadenoise(s=25).output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]atadenoise=s=25[tag0]" -map [tag0] testdata\media\v0_atadenoise.mp4 -y -hide_banner +[1.5373s] +``` + +#### 对比 + +肉眼看不出差别: + +[视频对比链接] + +## avgblur + +> https://ffmpeg.org/ffmpeg-filters.html#avgblur + +平均模糊滤波器。 + +### 参数 + +- sizeX 水平半径 +- sizeY 垂直半径 +- planes 通道,默认全部 + +### 示例 + +```python +_ = input(src).avgblur(x=10, y=10).output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]avgblur=sizeX=10:sizeY=10[tag0]" -map [tag0] testdata\media\v0_avgblur.mp4 -y -hide_banner +[0.7048s] +``` + +#### 对比 + +[视频对比链接] + +## bbox + +> https://ffmpeg.org/ffmpeg-filters.html#bbox + +计算输入帧亮度通道中非黑色像素的边界框。 + +该过滤器计算包含所有像素的边界框,这些像素的亮度值大于最小允许值。 描述边界框的参数打印在过滤器日志中。对于输出视频无影响。 + +### 参数 + +- min_val 最小亮度值,默认 16 + +### 示例 + +```python +_ = input(src).bbox(min_val=100).output(dst).run(capture_stderr=False,capture_stdout=False) +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]bbox=min_val=100[tag0]" -map [tag0] testdata\media\v0_bbox.mp4 -y -hide_banner +[0.9152s] +``` + +#### 对比 + +只是输出了描述边界框的参数,对于输出视频无影响。 + + +## bilateral + +> https://ffmpeg.org/ffmpeg-filters.html#bilateral + +双边过滤器,在保留边缘的同时进行空间平滑。 + +### 参数 + +- sigmaS 设置高斯函数的 sigma 以计算空间权重。 允许范围为 0 到 512。默认值为 0.1 +- sigmaR 设置高斯函数的 sigma 以计算范围权重。允许范围为 0 到 1。默认值为 0.1 +- planes 设置通道,默认第一通道 + +### 示例 + +```python +_ = input(src).bilateral(s=12, r=0.3).output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]bilateral=sigmaR=0.3:sigmaS=12[tag0]" -map [tag0] testdata\media\v0_bilateral.mp4 -y -hide_banner +[1.6872s] +``` + +#### 对比 + +[视频对比链接] + + +## 10 bitplanenoise + +> https://ffmpeg.org/ffmpeg-filters.html#bitplanenoise + +显示和测量位通道噪声。 + +### 参数 + +- bitplane 指定分析通道,默认 1 +- filter 从上面设置的位通道中滤除噪点像素,默认设置为禁用。 + +### 示例 + +```python +_ = input(src).bitplanenoise(filter=True).output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]bitplanenoise=filter=True[tag0]" -map [tag0] testdata\media\v0_bitplanenoise.mp4 -y -hide_banner +[0.8679s] +``` + +#### 对比 + +[视频对比链接] + +## blackdetect + +> https://ffmpeg.org/ffmpeg-filters.html#blackdetect + +检测视频中全黑的段落。 + +过滤器将其检测分析以及帧元数据输出到日志。 如果找到至少指定了最小持续时间的黑色段,则会将带有开始和结束时间戳记以及持续时间的行打印到带有级别信息的日志中。 另外,每帧打印一条带调试级别的日志行,显示该帧检测到的黑色量。 + +过滤器还将键为 lavfi.black_start 的元数据附加到黑色段的第一帧,并将键为 lavfi.black_end 的元数据附加到黑色段结束后的第一帧。该值是帧的时间戳。无论指定的最短持续时间如何,都会添加此元数据。 + +### 参数 + +- black_min_duration, d 设置检测到的最小黑屏持续时间(以秒为单位),必须是非负浮点数,默认 2.0 +- picture_black_ratio_th, pic_th 设置用于考虑图片“黑色”的阈值,表示比率的最小值:`nb_black_pixels / nb_pixels`,超过该阈值即认为是黑屏,默认 0.98 +- pixel_black_th, pix_th 设置用于考虑像素“黑色”的阈值。阈值表示将像素视为“黑色”的最大像素亮度值。 提供的值根据以下公式缩放:`absolute_threshold = luminance_minimum_value + pixel_black_th * luminance_range_size`,`luminance_range_size和luminance_minimum_value` 取决于输入的视频格式,YUV 全范围格式的范围是[0-255],YUV 非全范围格式的范围是[16-235]。默认 0.10 + +### 示例 + +```python +_ = input(src).blackdetect(d=2, pix_th=0.00).output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]blackdetect=d=2:pix_th=0.0[tag0]" -map [tag0] testdata\media\v0_blackdetect.mp4 -y -hide_banner +[0.3799s] +``` + +#### 对比 + +只是打印了信息和写入了元数据,输出视频与原视频肉眼看无差别。 + +## blackframe + +> https://ffmpeg.org/ffmpeg-filters.html#blackframe + +检测全黑的帧。 输出行包括检测到的帧的帧号,黑度百分比,文件中的位置或 -1 以及以秒为单位的时间戳。 + +为了显示输出行,至少将日志级别设置为 AV_LOG_INFO 值。 + +此过滤器导出帧元数据 lavfi.blackframe.pblack。该值表示图片中低于阈值的像素百分比。 + +### 参数 + +- amount 必须低于阈值的像素百分比;默认为 98 +- threshold 阈值,低于该阈值将被视为黑色;默认为 32 + + +### 示例 + +```python +_ = input(src).blackframe(amount=95, threshold=24).output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]blackframe=amount=95:threshold=24[tag0]" -map [tag0] testdata\media\v0_blackframe.mp4 -y -hide_banner +[0.8265s] +``` + +#### 对比 + +只是打印了信息和写入了元数据,输出视频与原视频肉眼看无差别。 + +## blend + +> https://ffmpeg.org/ffmpeg-filters.html#blend + +将两个视频帧混合在一起。输入两个视频流,第一个输入作为上层帧,第二个输入作为底层帧,控制底层和上层帧显示的权重。 + +`tblend` 从单个流中获取两个连续的帧,并输出将新帧混合到旧帧上获得的结果。 + +### 参数 + +- c0_mode +- c1_mode +- c2_mode +- c3_mode +- all_mode 为特定的像素分量或所有像素分量设置混合模式。默认 normal。有效的模式有: + + - ‘addition’ + - ‘grainmerge’ + - ‘and’ + - ‘average’ + - ‘burn’ + - ‘darken’ + - ‘difference’ + - ‘grainextract’ + - ‘divide’ + - ‘dodge’ + - ‘freeze’ + - ‘exclusion’ + - ‘extremity’ + - ‘glow’ + - ‘hardlight’ + - ‘hardmix’ + - ‘heat’ + - ‘lighten’ + - ‘linearlight’ + - ‘multiply’ + - ‘multiply128’ + - ‘negation’ + - ‘normal’ + - ‘or’ + - ‘overlay’ + - ‘phoenix’ + - ‘pinlight’ + - ‘reflect’ + - ‘screen’ + - ‘softlight’ + - ‘subtract’ + - ‘vividlight’ + - ‘xor’ + +- c0_opacity +- c1_opacity +- c2_opacity +- c3_opacity +- all_opacity 设置特定像素组件或所有像素组件的混合不透明度。 仅与像素成分混合模式结合使用。 + +- c0_expr +- c1_expr +- c2_expr +- c3_expr +- all_expr 设置特定像素分量或所有像素分量的混合表达式。 如果设置了相关的模式选项,则将忽略。表示式可用变量: + + - N 过滤后的帧的序号,从 0 开始 + - X + - Y 当前样本的坐标 + - W + - H 当前过滤通道的宽度和高度 + - SW + - SH 被过滤通道的宽度和高度比例。 它是当前通道尺寸与亮度通道尺寸之间的比率,例如 对于 yuv420p 帧,亮度通道的值为 1,1,色度通道的值为 0.5,0.5 + - T 当前帧的时间,以秒为单位 + - TOP 第一个视频帧(顶层)当前位置的像素分量值 + - BOTTOM 第二个视频帧(底层)当前位置的像素分量值 + +`blend` 支持 `framesync` 参数。 + +### 示例 + +### 在 4 秒内从底层过渡到顶层 + +```python +_ = vfilters.blend(in1, in2, all_expr='A*(if(gte(T,4),1,T/4))+B*(1-(if(gte(T,4),1,T/4)))').output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -i testdata\media\1.mp4 -filter_complex "[0][1]blend=all_expr=A*(if(gte(T\,4)\,1\,T/4))+B*(1-(if(gte(T\,4)\,1\,T/4)))[tag0]" -map [tag0] testdata\media\v0_v1_blend_1.mp4 -y -hide_banner +[1.6540s] +``` + +#### 对比 + +[视频对比链接] + +### 从顶层到底层的线性水平过渡 + +```python +_ = vfilters.blend(in1, in2, all_expr='A*(X/W)+B*(1-X/W)').output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -i testdata\media\1.mp4 -filter_complex "[0][1]blend=all_expr=A*(X/W)+B*(1-X/W)[tag0]" -map [tag0] testdata\media\v0_v1_blend_2.mp4 -y -hide_banner +[2.4407s] +``` + +#### 对比 + +[视频对比链接] + +### 1x1 棋盘格效果 + +```python +_ = vfilters.blend(in1, in2, all_expr='if(eq(mod(X,2),mod(Y,2)),A,B)').output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -i testdata\media\1.mp4 -filter_complex "[0][1]blend=all_expr=if(eq(mod(X\,2)\,mod(Y\,2))\,A\,B)[tag0]" -map [tag0] testdata\media\v0_v1_blend_3.mp4 -y -hide_banner +[0.8931s] +``` + +#### 对比 + +[视频对比链接] + +### 从右边揭开效果 + +```python +_ = vfilters.blend(in1, in2, all_expr='if(gte(N*SW+X*T,W),A,B)').output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -i testdata\media\1.mp4 -filter_complex "[0][1]blend=all_expr=if(gte(N*SW+X*T\,W)\,A\,B)[tag0]" -map [tag0] testdata\media\v0_v1_blend_4.mp4 -y -hide_banner +[2.3491s] +``` + +#### 对比 + +[视频对比链接] + +### 从上边揭开效果 + +```python +_ = vfilters.blend(in1, in2, all_expr='if(gte(Y-N*SH*T,0),A,B)').output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -i testdata\media\1.mp4 -filter_complex "[0][1]blend=all_expr=if(gte(Y-N*SH*T\,0)\,A\,B)[tag0]" -map [tag0] testdata\media\v0_v1_blend_5.mp4 -y -hide_banner +[0.9813s] +``` + +#### 对比 + +[视频对比链接] + +### 从右下角揭开效果 + +```python +_ = vfilters.blend(in1, in2, all_expr='if(gte(T*SH*40+Y*T,H)*gte((T*40*SW+X)*W/H,W),A,B)').output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -i testdata\media\1.mp4 -filter_complex "[0][1]blend=all_expr=if(gte(T*SH*40+Y*T\,H)*gte((T*40*SW+X)*W/H\,W)\,A\,B)[tag0]" -map [tag0] testdata\media\v0_v1_blend_6.mp4 -y -hide_banner +[1.8537s] +``` + +#### 对比 + +[视频对比链接] + +### 对角显示 + +```python +_ = vfilters.blend(in1, in2, all_expr='if(gt(X,Y*(W/H)),A,B)').output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -i testdata\media\1.mp4 -filter_complex "[0][1]blend=all_expr=if(gt(X\,Y*(W/H))\,A\,B)[tag0]" -map [tag0] testdata\media\v0_blend.mp4 -y -hide_banner +[0.8858s] +``` + +#### 对比 + +[视频对比链接] + +### 显示当前帧和上一帧之间的差异 + +```python +_ = in1.tblend(all_mode="grainextract").output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]tblend=all_mode=grainextract[tag0]" -map [tag0] testdata\media\v0_v1_blend_8.mp4 -y -hide_banner +[0.4555s] +``` + +#### 对比 + +[视频对比链接] + +## bm3d + +> https://ffmpeg.org/ffmpeg-filters.html#bm3d + +用块匹配 3D 算法(Block-Matching 3D algorithm)对帧进行消噪。 + +### 参数 + +- sigma 设置降噪强度。 默认值为 1。允许的范围是 0 到 999.9。去噪算法对 sigma 非常敏感,因此请根据信号源进行调整。 +- block 设置本地补丁大小。 这将以 2D 设置尺寸。 +- bstep 设置处理块的滑动步长。 默认值为 4。允许的范围是 1 到 64。较小的值允许处理更多的参考块,并且速度较慢。 +- group 设置第 3 维相似块的最大数量。默认值为 1。设置为 1 时,不执行块匹配。较大的值可以在一个组中包含更多块。允许范围是 1 到 256。 +- range 设置搜索块匹配的半径。 默认值为 9。允许的范围是 1 到 INT32_MAX。 +- mstep 在两个搜索位置之间设置步长以进行块匹配。 默认值为 1。允许的范围是 1 到 64。越小越慢。 +- thmse 设置块匹配的均方误差阈值。 有效范围是 0 到 INT32_MAX。 +- hdthr 为 3D 转换域中的硬阈值设置阈值参数。 较大的值将导致频域中的更强硬阈值滤波。 +- estim 设置过滤评估模式。 可以是 basic 或 final。 默认为 basic。 +- ref 如果启用,过滤器将使用第二流进行块匹配。 estim 为 basic 则默认禁用,如果 estim 是 final,则始终默认启用。 +- planes 设置过滤的通道。 默认值是除 alpha 以外的所有通道。 + +### 示例 + +```python +_ = input(src).bm3d(sigma=3, block=4, bstep=2, group=1, estim="basic").output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]bm3d=block=4:bstep=2:estim=basic:group=1:sigma=3[tag0]" -map [tag0] testdata\media\v0_bm3d.mp4 -y -hide_banner +[9.0762s] +``` + +#### 对比 + +差别不大。 + +[视频对比链接] + +## boxblur + +> https://ffmpeg.org/ffmpeg-filters.html#boxblur + +boxblur 算法滤镜。 + +### 参数 + +- luma_radius, lr +- chroma_radius, cr +- alpha_radius, ar 用于为框半径设置一个表达式,以像素为单位来模糊相应的输入通道。半径值必须为非负数,并且对于亮度和 alpha 通道,不得大于表达式 min(w,h)/2 的值,对于色度,不得大于 min(cw,ch)/2 的值。luma_radius 的默认值为 “2”。如果未指定,则 chroma_radius 和 alpha_radius 默认为 luma_radius 设置的相应值。可用的常量有: + - w + - h 输入视频的像素宽和高 + - cw + - ch 输入色度图像的像素宽和高 + - hsub + - vsub 水平和垂直色度子样本值。 例如,对于像素格式 “yuv422p”,hsub 为 2,vsub 为 1 + +- luma_power, lp +- chroma_power, cp +- alpha_power, ap 指定将 boxblur 滤镜应用到相应通道的次数。luma_power 的默认值为 2。如果未指定,则 chroma_power 和 alpha_power 默认为 luma_power 设置的相应值。值为 0 将禁用效果。 + +### 示例 + +```python +_ = input(src).boxblur(luma_radius="min(h,w)/10", luma_power=1, + chroma_radius="min(cw,ch)/10", chroma_power=1). \ + output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]boxblur=chroma_power=1:chroma_radius=min(cw\,ch)/10:luma_power=1:luma_radius=min(h\,w)/10[tag0]" -map [tag0] testdata\media\v0_boxblur.mp4 -y -hide_banner +[0.7788s] +``` + +#### 对比 + +[视频对比链接] + +## bwdif + +> https://ffmpeg.org/ffmpeg-filters.html#bwdif + +反交错滤波器 Bob Weaver Deinterlacing Filter。基于 yadif 运动自适应反交错,使用 w3fdif 和三次插值算法。 + +### 参数 + +- mode 交错扫描模式 + - 0, send_frame 每帧输出一帧 + - 1, send_field 每个场输出一帧,默认 +- parity 假定输入的交错视频进行了图像场奇偶校验 + - 0, tff 假定顶部场优先 + - 1, bff 假定底部场优先 + - -1, auto 自动检测场奇偶校验,默认,如果隔行扫描是未知的,或者解码器未导出此信息,则将假定顶场优先。 +- deint 指定反交错的帧 + - 0, all 默认,全部 + - 1, interlaced 仅反交错帧标记为隔行扫描 + +### 示例 + +```python +_ = input(src).bwdif(mode=0, parity=0, deint=0).output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]bwdif=deint=0:mode=0:parity=0[tag0]" -map [tag0] testdata\media\v0_bwdif.mp4 -y -hide_banner +[1.5152s] +``` + +#### 对比 + +肉眼看不出差别。 + +## cas + +> https://ffmpeg.org/ffmpeg-filters.html#cas + +对视频流应用对比度自适应锐化滤波器(Contrast Adaptive Sharpen)。 + +### 参数 + +- strength 设置锐化强度。 0~1,预设值为0。 +- planes 设置通道。 + +### 示例 + +```python +_ = input(src).cas(strength=1).output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]cas=strength=1[tag0]" -map [tag0] testdata\media\v0_cas.mp4 -y -hide_banner +[2.2220s] +``` + +#### 对比 + +[视频对比链接] + +## chromahold + +> https://ffmpeg.org/ffmpeg-filters.html#chromahold + +删除除某些颜色以外的所有颜色的所有颜色信息。 + +### 参数 + +- color 不会被中性色度取代的颜色。 +- similarity 与上述颜色的相似度百分比。 0.01 仅匹配确切的键色,而 1.0 匹配所有键色。 +- blend 混合百分比。 0.0 使像素要么全灰,要么根本不灰。 值越高,保留的颜色越多。 +- yuv 表示通过的颜色已经是 YUV 而不是 RGB。启用此功能后,文字颜色(如“green”或“red”)不再有意义。 这可以用来传递确切的 YUV 值作为十六进制数。 + +### 示例 + +```python +_ = input(src).chromahold(color="white", similarity=0.03).output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]chromahold=color=white:similarity=0.03[tag0]" -map [tag0] testdata\media\v0_chromahold.mp4 -y -hide_banner +[1.6571s] +``` + +#### 对比 + +[视频对比链接] + +## chromakey + +> https://ffmpeg.org/ffmpeg-filters.html#chromakey + +YUV 颜色空间颜色/色度键控。用途之一就是从绿幕背景视频中提取人物合成到其他视频中。 + +### 参数 + +- color 将被透明取代的颜色。 +- similarity 与上述颜色的相似度百分比。 0.01 仅匹配确切的键色,而 1.0 匹配所有键色。 +- blend 混合百分比。 0.0 使像素要么全透明,要么根本不透明。 较高的值会导致半透明像素,像素的颜色越接近关键颜色的透明度越高。 +- yuv 表示通过的颜色已经是 YUV 而不是 RGB。启用此功能后,文字颜色(如“green”或“red”)不再有意义。 这可以用来传递确切的 YUV 值作为十六进制数。 + +### 示例 + +```python +_ = input(src).chromakey(color="gray", similarity=0.02).output(dst).run() +``` + +``` +ffmpeg -i testdata\i3.png -filter_complex "[0]chromakey=color=gray:similarity=0.02[tag0]" -map [tag0] testdata\media\v1_chromakey.png -y -hide_banner +``` + +#### 对比 + +[视频对比链接] + +## chromanr + +> https://ffmpeg.org/ffmpeg-filters.html#chromanr + +降低色度噪点。 + +### 参数 + +- thres 设置平均色度值的阈值。低于该阈值的当前像素和相邻像素的 Y,U 和 V 像素分量的绝对差之和将用于平均。亮度分量保持不变,并复制到输出。默认值为 30。允许的范围是 1 到 200。 +- sizew 设置用于平均的矩形的水平半径。允许范围是 1 到 100。默认值是 5。 +- sizeh 设置用于平均的矩形的垂直半径。允许范围是 1 到 100。默认值是 5。 +- stepw 平均时设置水平步长。默认值为 1。允许的范围是 1 到 50。对加速过滤很有用。 +- steph 平均时设置垂直步长。默认值为 1。允许的范围是 1 到 50。对加速过滤很有用。 +- threy 设置 Y 阈值以平均色度值。为当前像素和近邻像素的 Y 分量之间的最大允许差异设置更好的控制。默认值为 200。允许的范围是 1 到 200。 +- threu 设置 U 阈值以平均色度值。为当前像素和近邻像素的 U 分量之间的最大允许差异设置更好的控制。默认值为 200。允许的范围是 1 到 200。 +- threv 设置 V 阈值以平均色度值。为当前像素和近邻像素的 V 分量之间的最大允许差异设置更好的控制。默认值为 200。允许的范围是 1 到 200。 + +### 示例 + +```python +_ = input(src).chromanr(thres=100, sizew=20).output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]chromanr=sizew=20:thres=100[tag0]" -map [tag0] testdata\media\v0_chromanr.mp4 -y -hide_banner +[1.6579s] +``` + +#### 对比 + +差别不明显。 + +## chromashift + +> https://ffmpeg.org/ffmpeg-filters.html#chromashift + +水平和/或垂直移动色度像素。 + +### 参数 + +- cbh 设置数量以水平移动蓝色色度 chroma-blue。 +- cbv 同上垂直 +- crh 设置数量以水平移动红色色度 chroma-red。 +- crv 同上垂直 +- edge 设置边缘模式:smear, default, warp + +### 示例 + +```python +_ = input(src).chromashift(cbh=100, cbv=-100, crh=100, crv=-100).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]chromashift=cbh=100:cbv=-100:crh=100:crv=-100[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_chromashift.mp4 -y -hide_banner +[1.2807s] +``` + +> 这里开启了 Cuda 加速设置,之后也是默认开启加速。 + +#### 对比 + +[视频对比链接] + +## ciescope + +> https://ffmpeg.org/ffmpeg-filters.html#ciescope + +显示覆盖像素的 CIE 彩色图表。 + +### 参数 + +- system 设置颜色系统 + - ‘ntsc, 470m’ + - ‘ebu, 470bg’ + - ‘smpte’ + - ‘240m’ + - ‘apple’ + - ‘widergb’ + - ‘cie1931’ + - ‘rec709, hdtv’ + - ‘uhdtv, rec2020’ + - ‘dcip3’ +- cie 设置 CIE 系统 + - ‘xyy’ + - ‘ucs’ + - ‘luv’ +- gamuts 设置要绘制的色域 +- size 设置 ciescope 大小,默认情况下设置为 512 +- intensity 设置用于将输入像素值映射到 CIE 图的强度 +- contrast 设置对比度以绘制超出主动色彩系统色域的颜色 +- corrgamma 正确显示范围的伽玛,默认启用 +- showwhite 显示 CIE 图上的白点,默认禁用 +- gamma 设置输入伽玛。 仅与 XYZ 输入色彩空间一起使用 + +### 示例 + +```python +_ = input(src).ciescope(size=1024, intensity=1, contrast=1).output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]ciescope=contrast=1:intensity=1:size=1024[tag0]" -map [tag0] testdata\media\v0_ciescope.mp4 -y -hide_banner +[2.4198s] +``` + +> 生成的视频不支持 10 bit 编码,无法进行 Cuda 加速。 + +#### 对比 + +[视频对比链接] + +## codecview + +> https://ffmpeg.org/ffmpeg-filters.html#codecview + +可视化某些编解码器导出的信息。 + +某些编解码器可以使用边数据或其他方式通过帧导出信息。 例如,某些基于 MPEG 的编解码器通过编解码器 flags2 选项中的 export_mvs 标志导出运动矢量。 + +### 参数 + +- mv 设置运动矢量进行可视化 + - ‘pf’ P-frames 前向预测 + - ‘bf’ B-frames 前向预测 + - ‘bb’ B-frames 后向预测 +- qp 使用色度通道显示量化参数 +- mv_type 设置运动矢量类型以使其可视化。 包括所有帧的 MV,除非 frame_type 选项指定。 + - ‘fp’ 前向预测 + - ‘bp’ 后向预测 +- frame_type 设置帧类型以可视化运动矢量 + - ‘if’ I-frames + - ‘pf’ P-frames + - ‘bf’ B-frames + +### 示例 + +```python +_ = input(src).codecview(mv_type="fp").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]codecview=mv_type=fp[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_codecview.mp4 -y -hide_banner +[1.7066s] +``` + +#### 对比 + +这个命令似乎是只属于 FFplay 的,用 FFmpeg 处理后无差别。 + +```shell +ffplay -flags2 +export_mvs testdata\media\0.mp4 -vf codecview +=mv=pf+bf+bb +``` + +## colorbalance + +> https://ffmpeg.org/ffmpeg-filters.html#colorbalance + +修改输入框的原色(红色,绿色和蓝色)的强度。 + +滤镜允许在阴影,中间调或高光区域中调整输入框,以实现红蓝,绿洋红或蓝黄色平衡。 + +正调整值会将平衡移向原色,负调整值将移向互补色。 + +### 参数 + +- rs +- gs +- bs 调整红色,绿色和蓝色阴影(最暗的像素) +- rm +- gm +- bm 调整红色,绿色和蓝色中间色调(中等像素) +- rh +- gh +- bh 调整红色,绿色和蓝色高光(最亮的像素),允许的选项范围为 [-1.0, 1.0]。默认值是 0 +- pl 更改色彩平衡时保持亮度, 默认设置为禁用 + +### 示例 + +```python +_ = input(src).colorbalance(rs=0.3).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]colorbalance=rs=0.3[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_colorbalance.mp4 -y -hide_banner +[1.9180s] +``` + +#### 对比 + +[视频对比链接] + +## colorcontrast + +> https://ffmpeg.org/ffmpeg-filters.html#colorcontrast + +调整 RGB 组件之间的颜色对比度。 + +### 参数 + +- rc 设置红-青对比度。默认值为 0.0。允许的范围是 -1.0 到 1.0。 +- gm 设置绿色-品红对比度。默认值为 0.0。允许的范围是 -1.0 到 1.0。 +- by 设置蓝-黄对比度。默认值为 0.0。允许的范围是 -1.0 到 1.0。 +- rcw +- gmw +- byw 通过选项值设置每个 rc 的权重 gm。默认值为 0.0。允许范围是 0.0 到 1.0。如果所有权重均为 0.0,则禁用过滤。 +- pl 设置保存亮度。默认值为 0.0。允许范围是 0.0 到 1.0。 + +### 示例 + +No such filter: 'colorcontrast' + +官网英文文档虽然还有这个滤镜的说明,但最新版找不到这个滤镜,Google 也找不到这个滤镜的信息,可能已经移除? + +## colorcorrect + +> https://ffmpeg.org/ffmpeg-filters.html#colorcorrect + +有选择地调整彩色白平衡。 该滤镜在 YUV 色彩空间中运行。 + +### 参数 + +- rl 设置红色阴影点。允许的范围是 -1.0 到 1.0。预设值为 0。 +- bl 设置蓝色阴影点。允许的范围是 -1.0 到 1.0。预设值为 0。 +- rh 设置红色高光点。允许的范围是 -1.0 到 1.0。预设值为 0。 +- bh 设置红色高光点。允许的范围是 -1.0 到 1.0。预设值为 0。 +- saturation 设置饱和度。允许范围是 -3.0 到 3.0。预设值为 1。 + +### 示例 + +No such filter: 'colorcorrect' + +官网英文文档虽然还有这个滤镜的说明,但最新版找不到这个滤镜,Google 也找不到这个滤镜的信息,可能已经移除? + +## colorchannelmixer + +> https://ffmpeg.org/ffmpeg-filters.html#colorchannelmixer + +通过重新混合颜色通道来调整视频输入帧。 + +该滤镜通过添加与相同像素的其他通道关联的值来修改颜色通道。 例如,如果要修改的值为红色,则输出值为: + +``` +red=red*rr + blue*rb + green*rg + alpha*ra +``` + +### 参数 + +- rr +- rg +- rb +- ra 调整输入红色,绿色,蓝色和 Alpha 通道对输出红色通道的贡献。rr 的默认值为 1,rg,rb 和 ra 的默认值为 0。 +- gr +- gg +- gb +- ga 调整输入红色,绿色,蓝色和 Alpha 通道对输出绿色通道的贡献。gg 的默认值为 1,gr,gb 和 ga 的默认值为 0。 +- br +- bg +- bb +- ba 调整输入红色,绿色,蓝色和 Alpha 通道对输出蓝色通道的贡献。bb 的默认值为 1,而 br,bg 和 ba 的默认值为 0。 +- ar +- ag +- ab +- aa 调整输入红色,绿色,蓝色和 Alpha 通道对输出 Alpha 通道的贡献。aa 的默认值为 1,ar,ag 和 ab 的默认值为 0。 + +以上参数范围:[-2.0, 2.0] + +- pl 更改颜色时保持亮度。允许的范围是 [0.0, 1.0]。默认值为 0.0,相当于禁用。 + +### 示例 + +#### 转换为灰度 + +```python +_ = input(src).colorchannelmixer(.3, .4, .3, 0, .3, .4, .3, 0, .3, .4, .3).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]colorchannelmixer=0.3:0.4:0.3:0:0.3:0.4:0.3:0:0.3:0.4:0.3[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_colorchannelmixer.mp4 -y -hide_banner +[0.4375s] +``` + +### 棕褐色调 + +```python +_ = input(src).colorchannelmixer(.393,.769,.189,0,.349,.686,.168,0,.272,.534,.131).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]colorchannelmixer=0.393:0.769:0.189:0:0.349:0.686:0.168:0:0.272:0.534:0.131[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_colorchannelmixer.mp4 -y -hide_banner +[0.4482s] +``` + +#### 对比 + +[视频对比链接] + +## colorize + +> https://ffmpeg.org/ffmpeg-filters.html#colorize + +在视频流上覆盖纯色。 + +### 参数 + +- hue 设置色调。允许的范围是 0 到 360。默认值为 0。 +- saturation 设置色彩饱和度。允许范围是 0 到 1。默认值是 0.5。 +- lightness 设置颜色亮度。允许范围是 0 到 1。默认值为 0.5。 +- mix 设置光源亮度的混合。默认情况下设置为 1.0。允许范围是 0.0 到 1.0。 + +### 示例 + +No such filter: 'colorcorrect' + +官网英文文档虽然还有这个滤镜的说明,但最新版找不到这个滤镜,可能已经移除? + +## colorkey + +> https://ffmpeg.org/ffmpeg-filters.html#colorkey + +RGB 色彩空间颜色键控。 + +### 参数 + +- color 将被替换为透明的颜色。 +- similarity 与关键颜色的相似性百分比。0.01 仅匹配确切的键色,而 1.0 匹配所有键色。 +- blend 混合百分比。0.0 使像素完全透明或完全不透明。较高的值会导致半透明像素,而透明度越高,像素颜色与键颜色越相似。 + +### 示例 + +```python +_ = input(src).colorkey(color="white", similarity=0.02, blend=0).output(dst).run() +``` + +``` +ffmpeg -i testdata\i3.png -filter_complex "[0]colorkey=blend=0:color=white:similarity=0.02[tag0]" -map [tag0] testdata\media\i3_colorkey.png -y -hide_banner +[0.0732s] +``` + +#### 对比 + +黑色部分为透明。 + +[视频对比链接] + +## colorhold + +> https://ffmpeg.org/ffmpeg-filters.html#colorhold + +移除除特定颜色外的所有 RGB 颜色的所有颜色信息。 + +### 参数 + +- color 不会被中性灰色替代的颜色。 +- similarity 与上述颜色的相似度百分比。 0.01 仅匹配确切的键色,而 1.0 匹配所有键色。 +- blend 混合百分比。 0.0 使像素要么全灰,要么根本不灰。 值越高,保留的颜色越多。 + +### 示例 + +```python +_ = input(src).colorhold(color="red", similarity=0.05, blend=0.2).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]colorhold=blend=0.2:color=red:similarity=0.05[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_colorhold.mp4 -y -hide_banner +[0.5202s] +``` + +#### 对比 + +[视频对比链接] + +## colorlevels + +> https://ffmpeg.org/ffmpeg-filters.html#colorlevels + +用电平调整视频输入帧。 + +### 参数 + +rimin +gimin +bimin +aimin 调整红,绿,蓝和 alpha 输入黑点。允许的选项范围为 [-1.0,1.0]。默认值是 0。 +rimax +gimax +bimax +aimax 调整红,绿,蓝和 alpha 输入白点。允许的选项范围为 [-1.0,1.0]。默认值是 1。输入级别用于亮化高光 ( 亮色调 ),暗阴影 ( 暗色调 ),改变明暗色调的平衡。 +romin +gomin +bomin +aomin 调整红,绿,蓝和输出黑点。允许的选项范围是 [0,1.0]。默认值是 0。 +romax +gomax +bomax +aomax 调整红,绿,蓝和 alpha 输出白点。允许的选项范围是 [0,1.0]。默认值是 1。输出电平允许手动选择一个受限的输出电平范围。 + +### 示例 + +#### 画面变暗 + +```python +_ = input(src).colorlevels(rimin=0.558, gimin=0.058, bimin=0.058).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]colorlevels=bimin=0.058:gimin=0.058:rimin=0.558[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_colorlevels1.mp4 -y -hide_banner +[0.4217s] +``` + +#### 增加对比度 + +```python +_ = input(src).colorlevels(rimin=0.39, gimin=0.39, bimin=0.39, rimax=0.6, + gimax=0.6, bimax=0.6).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]colorlevels=bimax=0.6:bimin=0.39:gimax=0.6:gimin=0.39:rimax=0.6:rimin=0.39[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_colorlevels2.mp4 -y -hide_banner +[1.5766s] +``` + +#### 画面变亮 + +```python +_ = input(src).colorlevels(rimax=0.602, gimax=0.602, bimax=0.602).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]colorlevels=bimax=0.602:gimax=0.602:rimax=0.602[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_colorlevels3.mp4 -y -hide_banner +[1.5676s] +``` + +#### 增加明亮度 + +```python +_ = input(src).colorlevels(romin=0.5, gomin=0.5, bomin=0.5).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]colorlevels=bomin=0.5:gomin=0.5:romin=0.5[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_colorlevels4.mp4 -y -hide_banner +[1.6679s] +``` + +#### 对比 + +[视频对比链接] + +## colormatrix + +> https://ffmpeg.org/ffmpeg-filters.html#colormatrix + +转换色彩矩阵。 + +### 参数 + +- src +- dst 指定源和目标颜色矩阵。 必须同时指定这两个值。 + - ‘bt709’ BT.709 + - ‘fcc’ FCC + - ‘bt601’ BT.601 + - ‘bt470’ BT.470 + - ‘bt470bg’ BT.470BG + - ‘smpte170m’ SMPTE-170M + - ‘smpte240m’ SMPTE-240M + - ‘bt2020’ BT.2020 + +### 示例 + +```python +_ = input(src).colormatrix("bt601","smpte240m").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]colormatrix=bt601:smpte240m[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_colormatrix.mp4 -y -hide_banner +[0.4466s] +``` + +#### 对比 + +[视频对比链接] + +## colorspace + +> https://ffmpeg.org/ffmpeg-filters.html#colorspace + +转换色彩空间,转印特性或色彩原色。 输入视频的大小必须均匀。 + +### 参数 + +- all 一次指定所有颜色属性。 + - ‘bt470m’ BT.470M + - ‘bt470bg’ BT.470BG + - ‘bt601-6-525’ BT.601-6 525 + - ‘bt601-6-625’ BT.601-6 625 + - ‘bt709’ BT.709 + - ‘smpte170m’ SMPTE-170M + - ‘smpte240m’ SMPTE-240M + - ‘bt2020’ BT.2020 + +- space 指定输出色彩空间。 + - ‘bt709’ BT.709 + - ‘fcc’ FCC + - ‘bt470bg’ BT.470BG or BT.601-6 625 + - ‘smpte170m’ SMPTE-170M or BT.601-6 525 + - ‘smpte240m’ SMPTE-240M + - ‘ycgco’ YCgCo + - ‘bt2020ncl’ BT.2020 with non-constant luminance + +- trc 指定输出传输特性。 + - ‘bt709’ BT.709 + - ‘bt470m’ BT.470M + - ‘bt470bg’ BT.470BG + - ‘gamma22’ Constant gamma of 2.2 + - ‘gamma28’ Constant gamma of 2.8 + - ‘smpte170m’ SMPTE-170M, BT.601-6 625 or BT.601-6 525 + - ‘smpte240m’ SMPTE-240M + - ‘srgb’ SRGB + - ‘iec61966-2-1’ iec61966-2-1 + - ‘iec61966-2-4’ iec61966-2-4 + - ‘xvycc’ xvycc + - ‘bt2020-10’ BT.2020 for 10-bits content + - ‘bt2020-12’ BT.2020 for 12-bits content + +- primaries 指定输出颜色原色。 + - ‘bt709’ BT.709 + - ‘bt470m’ BT.470M + - ‘bt470bg’ BT.470BG or BT.601-6 625 + - ‘smpte170m’ SMPTE-170M or BT.601-6 525 + - ‘smpte240m’ SMPTE-240M + - ‘film’ film + - ‘smpte431’ SMPTE-431 + - ‘smpte432’ SMPTE-432 + - ‘bt2020’ BT.2020 + - ‘jedec-p22’ JEDEC P22 phosphors + +- range 指定输出颜色范围。 + - ‘tv’ TV (restricted) range + - ‘mpeg’ MPEG (restricted) range + - ‘pc’ PC (full) range + - ‘jpeg’ JPEG (full) range + +- format 指定输出颜色格式。 + - ‘yuv420p’ YUV 4:2:0 planar 8-bits + - ‘yuv420p10’ YUV 4:2:0 planar 10-bits + - ‘yuv420p12’ YUV 4:2:0 planar 12-bits + - ‘yuv422p’ YUV 4:2:2 planar 8-bits + - ‘yuv422p10’ YUV 4:2:2 planar 10-bits + - ‘yuv422p12’ YUV 4:2:2 planar 12-bits + - ‘yuv444p’ YUV 4:4:4 planar 8-bits + - ‘yuv444p10’ YUV 4:4:4 planar 10-bits + - ‘yuv444p12’ YUV 4:4:4 planar 12-bits + +- fast 进行快速转换,从而跳过 gamma/primary 校正。 这将大大减少 CPU 的使用,但是在数学上是不正确的。 要使输出与由 colormatrix 滤镜产生的输出兼容,请使用 fast = 1。 + +- dither 指定抖动模式。 + - ‘none’ 无 + - ‘fsb’ Floyd-Steinberg 抖动 + +- wpadapt 白点适应模式。 + - ‘bradford’ Bradford + - ‘vonkries’ von Kries + - ‘identity’ identity + +iall 一次覆盖所有输入属性。与 all 相同的接受值。 +ispace 覆盖输入色彩空间。与 space 相同的接受值。 +iprimaries 覆盖输入颜色原色。接受的值与 primaries 相同。 +itrc 覆盖输入传输特性。与 trc 相同的接受值。 +irange 覆盖输入颜色范围。与 range 相同的接受值。 + +过滤器将转印特性,颜色空间和颜色原色转换为指定的用户值。 如果未指定,则输出值将基于 “all” 属性设置为默认值。 如果也未指定该属性,则过滤器将记录错误。 默认情况下,输出颜色范围和格式与输入颜色范围和格式的值相同。 输入传输特性,颜色空间,颜色原色和颜色范围应在输入数据上设置。 如果缺少任何这些,过滤器将记录一个错误,并且不会进行任何转换。 + +### 示例 + +```python +_ = input(src).colorspace("smpte240m").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]colorspace=smpte240m[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_colorspace.mp4 -y -hide_banner + +Unsupported input primaries 2 (unknown) +``` + +报错,另外不太理解这个滤镜,今后如果涉及再补。 + +## colortemperature + +> https://ffmpeg.org/ffmpeg-filters.html#colortemperature + +调整视频中的色温以模拟环境色温的变化。 + +### 参数 + +- temperature 以开氏温度设置温度。允许的范围是 1000 到 40000。默认值是 6500K。 +- mix 设置混合并过滤输出。允许范围是 0 到 1。默认值为 1。 +- pl 设置保留亮度。允许的范围是 0 到 1。默认值是 0。 + +### 示例 + +No such filter: 'colortemperature' + +官网英文文档虽然还有这个滤镜的说明,但最新版找不到这个滤镜,可能已经移除? + +## convolution + +> https://ffmpeg.org/ffmpeg-filters.html#convolution + +应用 3x3、5x5、7x7 或水平/垂直(最多 49 个元素)的卷积。 + +### 参数 + +- 0m +- 1m +- 2m +- 3m 为每个通道设置矩阵。矩阵在方模式下是 9、25 或 49 个有符号整数的序列,在行模式下是 1 到 49 个奇数个有符号整数的序列。 + +- 0rdiv +- 1rdiv +- 2rdiv +- 3rdiv 为每个通道的计算值设置乘数。如果未设置或为 0,则它​​将是所有矩阵元素的总和。 + +- 0bias +- 1bias +- 2bias +- 3bias 为每个通道设置偏差。该值被加到相乘的结果上。用于使整个图像更亮或更暗。默认值为 0.0。 + +- 0mode +- 1mode +- 2mode +- 3mode 为每个通道设置矩阵模式。可以是 square, row, column。默认为 square。 + +### 示例 + +#### 锐化 + +```python +_ = input(src).convolution( + "0 -1 0 -1 5 -1 0 -1 0", + "0 -1 0 -1 5 -1 0 -1 0", + "0 -1 0 -1 5 -1 0 -1 0", + "0 -1 0 -1 5 -1 0 -1 0"). \ + output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]convolution=0m=0 -1 0 -1 5 -1 0 -1 0:1m=0 -1 0 -1 5 -1 0 -1 0:2m=0 -1 0 -1 5 -1 0 -1 0:3m=0 -1 0 -1 5 -1 0 -1 0[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_convolution1.mp4 -y -hide_banner +[0.4257s] +``` + +#### 对比 + +[视频对比链接] + +#### 模糊 + +```python +_ = input(src).convolution( + "1 1 1 1 1 1 1 1 1", + "1 1 1 1 1 1 1 1 1", + "1 1 1 1 1 1 1 1 1", + "1 1 1 1 1 1 1 1 1", + "1/9", "1/9", "1/9", "1/9"). \ + output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]convolution=0m=1 1 1 1 1 1 1 1 1:0rdiv=1/9:1m=1 1 1 1 1 1 1 1 1:1rdiv=1/9:2m=1 1 1 1 1 1 1 1 1:2rdiv=1/9:3m=1 1 1 1 1 1 1 1 1:3rdiv=1/9[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_convolution2.mp4 -y -hide_banner +[0.3846s] +``` + +#### 对比 + +[视频对比链接] + +#### 边缘强化 + +```python +_ = input(src).convolution( + "0 0 0 -1 1 0 0 0 0", + "0 0 0 -1 1 0 0 0 0", + "0 0 0 -1 1 0 0 0 0", + "0 0 0 -1 1 0 0 0 0", + "5", "1", "1", "1", "0", + "128", "128", "128"). \ + output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]convolution=0bias=0:0m=0 0 0 -1 1 0 0 0 0:0rdiv=5:1bias=128:1m=0 0 0 -1 1 0 0 0 0:1rdiv=1:2bias=128:2m=0 0 0 -1 1 0 0 0 0:2rdiv=1:3bias=128:3m=0 0 0 -1 1 0 0 0 0:3rdiv=1[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_convolution3.mp4 -y -hide_banner +[0.3373s] +``` + +#### 对比 + +[视频对比链接] + +#### 边缘检测 + +```python +_ = input(src).convolution( + "0 1 0 1 -4 1 0 1 0", + "0 1 0 1 -4 1 0 1 0", + "0 1 0 1 -4 1 0 1 0", + "0 1 0 1 -4 1 0 1 0", + "5", "5", "5", "1", "0", + "128", "128", "128"). \ + output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]convolution=0bias=0:0m=0 1 0 1 -4 1 0 1 0:0rdiv=5:1bias=128:1m=0 1 0 1 -4 1 0 1 0:1rdiv=5:2bias=128:2m=0 1 0 1 -4 1 0 1 0:2rdiv=5:3bias=128:3m=0 1 0 1 -4 1 0 1 0:3rdiv=1[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_convolution4.mp4 -y -hide_banner +[0.3418s] +``` + +#### 对比 + +[视频对比链接] + +#### 包括对角线的拉普拉斯边缘检测 + +```python +_ = input(src).convolution( + "1 1 1 1 -8 1 1 1 1", + "1 1 1 1 -8 1 1 1 1", + "1 1 1 1 -8 1 1 1 1", + "1 1 1 1 -8 1 1 1 1", + "5", "5", "5", "1", "0", + "128", "128", "0"). \ + output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]convolution=0bias=0:0m=1 1 1 1 -8 1 1 1 1:0rdiv=5:1bias=128:1m=1 1 1 1 -8 1 1 1 1:1rdiv=5:2bias=128:2m=1 1 1 1 -8 1 1 1 1:2rdiv=5:3bias=0:3m=1 1 1 1 -8 1 1 1 1:3rdiv=1[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_convolution5.mp4 -y -hide_banner +[0.3472s] +``` + +#### 对比 + +[视频对比链接] + +#### 浮雕效果 + +```python +_ = input(src).convolution( + "-2 -1 0 -1 1 1 0 1 2", + "-2 -1 0 -1 1 1 0 1 2", + "-2 -1 0 -1 1 1 0 1 2", + "-2 -1 0 -1 1 1 0 1 2"). \ + output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]convolution=0m=-2 -1 0 -1 1 1 0 1 2:1m=-2 -1 0 -1 1 1 0 1 2:2m=-2 -1 0 -1 1 1 0 1 2:3m=-2 -1 0 -1 1 1 0 1 2[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_convolution6.mp4 -y -hide_banner +[0.3639s] +``` + +#### 对比 + +[视频对比链接] + +## convolve + +> https://ffmpeg.org/ffmpeg-filters.html#convolve + +使用第二流作为脉冲在频域中应用视频流的 2D 卷积。 + +### 参数 + +- planes 设置通道 +- impulse 设置将处理哪些脉冲视频帧,first/all,默认 all + +### 示例 + +不懂,太专业了,素材也不好找。 + +## copy + +> https://ffmpeg.org/ffmpeg-filters.html#copy + +将输入视频源原样复制到输出。 + +### 参数 + +无。 + +### 示例 + +```python +_ = input(src).copy().output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]copy[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_copy.mp4 -y -hide_banner +``` + +## coreimage + +> https://ffmpeg.org/ffmpeg-filters.html#coreimage + +在 Apple OSX 上使用 Apple 的 CoreImage API 在 GPU 上进行视频过滤。 + +硬件加速基于 OpenGL 上下文。通常,这意味着它是由视频硬件处理的。但是,存在基于软件的 OpenGL 实现,这意味着无法保证硬件处理。这取决于各自的 OSX。 + +Apple 提供了许多滤镜和图像生成器,其中包含多种选择。过滤器必须通过其名称及其选项进行引用。 + +无 Apple OSX 设备,略过。 + +## cover_rect + +> https://ffmpeg.org/ffmpeg-filters.html#cover_rect + +覆盖一个矩形对象。 + +### 参数 + +- cover 可选封面图片的文件路径,必须是 yuv420。 +- mode 覆盖模式。 + - cover 用提供的图像覆盖 + - blur 通过插值周围的像素覆盖 + +### 示例 + +找不到素材,暂略。 + +## crop + +> https://ffmpeg.org/ffmpeg-filters.html#crop + +将输入视频裁剪为给定的尺寸。 + +### 参数 + +- w, out_w 输出视频的宽度。默认为 iw。在过滤器配置期间,或者在发送 “w” 或 “out_w” 命令时,该表达式仅计算一次。 +- h, out_h 输出视频的高度。默认为 ih。在过滤器配置过程中,或者在发送 “h” 或 “out_h” 命令时,该表达式仅计算一次。w,h 表达式可以包含以下变量: + - x + - y x 和 y 的计算值,每一帧都会被计算 + - in_w + - in_h 输入的宽度和高度 + - iw + - ih 与 in_w 和 in_h 相同 + - out_w + - out_h 输出(裁剪)的宽度和高度 + - ow + - oh 与 out_w 和 out_h 相同 + - a 与 iw / ih 相同 + - sar 输入样本宽高比 + - dar 输入显示宽高比,与 (iw/ih) * sar 相同 + - hsub + - vsub 水平和垂直色度子样本值。例如,对于像素格式 “yuv422p”,hsub 为 2,vsub 为 1。 + - n 输入帧的编号,从 0 开始。 + - pos 输入框在文件中的位置,如果未知,则为 NAN + - t 时间戳记,以秒为单位。 如果输入的时间戳未知,则为 NAN。 + +- x 输入视频中输出视频左边缘的水平位置。默认为 (in_w-out_w)/2。每帧评估该表达式。 +- y 输入视频中输出视频顶部边缘的垂直位置。默认为 (in_h-out_h)/2。每帧评估该表达式。 +- keep_aspect 如果设置为 1,则通过更改输出样本的宽高比,将使输出显示的宽高比与输入相同。默认为 0。 +- exact 启用精确裁剪。如果启用,则将按照指定的确切宽度 / 高度 / x / y 裁剪子采样视频,并且不会四舍五入到最接近的较小值。默认为 0。 + +out_w 的表达式可能取决于 out_h 的值,out_h 的表达式可能取决于 out_w,但是它们不能取决于 x 和 y,因为 x 和 y 在 out_w 和 out_h 之后求值。 + +x 和 y 参数指定输出(非裁剪)区域左上角位置的表达式。将对每个帧进行评估。如果评估值无效,则将其近似为最接近的有效值。 + +x 的表达式可能取决于 y,而 y 的表达式可能取决于 x。 + +### 示例 + +```python +_ = input(src).crop("in_w/2","in_h/2","in_w/2","in_h/2").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]crop=in_w/2:in_h/2:in_w/2:in_h/2[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_crop01.mp4 -y -hide_banner +[0.3990s] +``` + +#### 对比 + +[视频对比链接] + +## cropdetect + +> https://ffmpeg.org/ffmpeg-filters.html#cropdetect + +自动检测裁剪大小。计算必要的裁剪参数,并通过记录系统打印推荐的参数。 检测到的尺寸对应于输入视频的非黑色区域。 + +### 参数 + +- limit 设置较高的黑色值阈值,可以选择从零到所有值(对于 8 位格式为 255 )进行指定。大于设定值的强度值被认为是非黑色的。默认值为 24。您还可以指定一个介于 0.0 和 1.0 之间的值,该值将根据像素格式的位深进行缩放。 +- round 宽度 / 高度应被其整除的值。默认值为 16。偏移量会自动调整为使视频居中。使用 2 仅获得均匀尺寸。对大多数视频编解码器进行编码时,最好使用 16。 +- skip 设置跳过评估的初始帧数。默认值为 2。范围为 0 到 INT_MAX。最新版本 Option 'skip' not found。 +- reset_count, reset 设置计数器,该计数器确定 cropdetect 将在多少帧后重置之前检测到的最大视频区域,然后重新开始以检测当前的最佳作物区域。预设值为 0。当频道徽标使视频区域失真时,此功能很有用。0 表示 “从不重置 ”,并返回播放期间遇到的最大区域。 + + +### 示例 + +```python +_ = input(src).cropdetect(limit=200, round=20, reset_count=0).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]crop=in_w/2:in_h/2:in_w/2:in_h/2[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_crop01.mp4 -y -hide_banner +[0.3990s] +``` + +#### 对比 + +仅是打印了相关参数,对于合成视频无影响。 + +## cue + +> https://ffmpeg.org/ffmpeg-filters.html#cue + +将视频过滤延迟到给定的时钟时间戳。过滤器首先传递预滚动帧数,然后最多缓冲所有帧缓冲数并等待提示。到达提示后,它转发缓冲的帧以及输入中的任何后续帧。 + +该过滤器可用于同步多个 ffmpeg 进程的输出,以便实时输出设备(如 decklink )。通过将延迟置于过滤链和预缓冲帧中,该过程可以将数据传递到几乎达到目标时钟时间戳记之后立即输出。 + +不能保证完美的帧精度,但是对于某些用例来说,结果是足够好的。 + +### 参数 + +- cue 提示时间戳记,以 UNIX 时间戳记表示,以微秒为单位。默认值为 0。 +- preroll 要传递的内容持续时间(以秒为单位)作为预卷。默认值为 0。 +- buffer 等待提示之前要缓冲的内容的最大持续时间(以秒为单位)。默认值为 0。 + +### 示例 + +不太懂,暂略。 + +## curves + +> https://ffmpeg.org/ffmpeg-filters.html#curves + +根据曲线函数调整颜色。 + +该滤镜类似于 Adobe Photoshop 和 GIMP 曲线工具。每个组件(红色,绿色和蓝色)的值均由 N 个关键点定义,它们使用平滑曲线相互关联。x 轴表示来自输入帧的像素值,y 轴表示要为输出帧设置的新像素值。 + +默认情况下,分量曲线由两个点(0;0)和(1;1)定义。这将创建一条直线,其中每个原始像素值都被“调整”为其自己的值,这意味着图像不会发生变化。 + +过滤器使您可以重新定义这两点并添加更多内容。将定义一条新曲线(使用自然三次样条插值法)以平滑地通过所有这些新坐标。新定义的点必须在 x 轴上严格增加,并且它们的 x 和 y 值必须在 [0;1] 区间内。如果计算出的曲线恰好在向量空间之外,则将相应地剪切值。 + +### 参数 + +- preset 选择一种可用的颜色预设。 除了 r/g/b 参数外,还可以使用此选项。 在这种情况下,后面的选项优先于预设值。 + - ‘none’ 默认 + - ‘color_negative’ + - ‘cross_process’ + - ‘darker’ + - ‘increase_contrast’ + - ‘lighter’ + - ‘linear_contrast’ + - ‘medium_contrast’ + - ‘negative’ + - ‘strong_contrast’ + - ‘vintage’ +- master 设置主控关键点。 这些点将定义第二遍映射。 有时称为“亮度”或“值”映射。 它可以与 r/g/b/all 一起使用,因为它的作用类似于后处理 LUT。 +- red 设置红色组件的关键点。 +- green 同上 +- blue 同上 +- all 设置所有组件的关键点(不包括主组件)。 除其他关键点组件选项外,还可以使用。 在这种情况下,未设置的组件将在全部设置后退。 +- psfile 指定要从中导入设置的 Photoshop 曲线文件(.acv)。 +- plot 将曲线的 Gnuplot 脚本保存在指定文件中。 + +为了避免某些 filtergraph 语法冲突,需要使用以下语法定义每个关键点列表: + +``` +x0/y0 x1/y1 x2/y2 +``` + +### 示例 + +#### 稍微增加蓝色的中间水平 + +```python +_ = input(src).curves(blue='0/0 0.5/0.58 1/1').output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]curves=blue=0/0 0.5/0.58 1/1[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_curves1.mp4 -y -hide_banner +[0.6140s] +``` + +#### 对比 + +[视频对比链接] + +#### 复古效果 + +```python +_ = input(src).curves( + red='0/0.11 .42/.51 1/0.95', + green='0/0 0.50/0.48 1/1', + blue='0/0.22 .49/.44 1/0.8' +).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]curves=blue=0/0.22 .49/.44 1/0.8:green=0/0 0.50/0.48 1/1:red=0/0.11 .42/.51 1/0.95[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_curves2.mp4 -y -hide_banner +[0.4459s] +``` + +这个例子也可以用预设值实现: + +```python +_ = input(src).curves(preset="vintage").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]curves=preset=vintage[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_curves3.mp4 -y -hide_banner +[1.1474s] +``` + +#### 对比 + +[视频对比链接] + +## datascope + +> https://ffmpeg.org/ffmpeg-filters.html#datascope + +视频数据分析过滤器。 + +此过滤器显示部分视频的十六进制像素值。 + +### 参数 + +- size 设置输出视频大小。 +- x 设置从哪里拾取像素的x偏移量。 +- y 设置从像素拾取位置的y偏移。 +- mode 设定范围模式: + - mono 在黑色背景上绘制带有白色的十六进制像素值。 + - color 在黑色背景上用输入的视频像素颜色绘制十六进制像素值。 + - color2 在从输入视频中拾取的彩色背景上绘制十六进制像素值,以这种方式拾取文本颜色,使其始终可见。 +- axis 在视频的左侧和顶部绘制行号和列号。 +- opacity 设置背景不透明度。 +- format 设置显示编号格式。hex/dec,默认 hex。 +- components 设置要显示的像素分量。 默认情况下,显示所有像素分量。 + +### 示例 + +```python +_ = input(src).datascope(mode="color").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\v0_datascope.mp4 -filter_complex "[0]pad=w=2*iw[tag0];[tag0][1]overlay=x=w[tag1]" -vcodec h264_nvenc -map [tag1] C:\Users\Admin\Videos\contrast\v0_datascope_compare.mp4 -y -hide_banner +[0.5779s] +``` + +#### 对比 + +输出的视频全黑,不太懂。 + +## dblur + +> https://ffmpeg.org/ffmpeg-filters.html#dblur + +定向模糊滤镜。 + +### 参数 + +- angle 设置方向模糊的角度。 +- radius 设置方向模糊的半径。 +- planes 设置通道。默认全部。 + +### 示例 + +```python +_ = input(src).dblur(angle=30, radius=10).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]dblur=angle=30:radius=10[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_dblur.mp4 -y -hide_banner +[1.5746s] +``` + +#### 对比 + +[视频对比链接] + +## dctdnoiz + +> https://ffmpeg.org/ffmpeg-filters.html#dctdnoiz + +使用 2D DCT (频域滤波)对帧进行消噪。此过滤器为不是实时系统设计的。 + +### 参数 + +- sigma 设置噪声 sigma 常数。此 sigma 定义 3 * sigma 的硬阈值;低于此阈值的每个 DCT 系数(绝对值)都将被丢弃。如果需要更高级的过滤,请参见 expr。默认值为 0。 +- overlap 设置每个块的重叠像素数。由于过滤器的速度可能很慢,因此您可能希望降低此值,但代价是效率较低的过滤器和各种伪像的风险。如果重叠的值不允许处理整个输入的宽度或高度,则将显示警告,并且不会对相应的边框进行反色处理。默认值为 blockize-1,这是可能的最佳设置。 +- expr 设置系数因子表达式。对于 DCT 块的每个系数,该表达式将被评估为系数的乘数值。如果设置了此选项,则将忽略 sigma 选项。系数的绝对值可通过 c 变量访问。 +- n 使用位数设置 blocksize。1 << n 定义 blocksize,即已处理块的宽度和高度。默认值为 3 ( 8x8 ),对于 16x16 的 blocksize,可以将其提高到 4。请注意,更改此设置会对速度处理产生重大影响。同样,更大的 blocksize 并不一定意味着更好的去噪。 + +### 示例 + +```python +_ = input(src).dctdnoiz(4.5).output(dst).run() +_ = input(src).dctdnoiz(expr="gte(c, 4.5*3)").output(dst).run() # 等价 +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]dctdnoiz=4.5[tag0]" -vcodec h264_nvenc -map [tag0] testdata\media\v0_dctdnoiz1.mp4 -y -hide_banner +[5.9960s] +``` + +#### 对比 + +[视频对比链接] + +## deband + +> https://ffmpeg.org/ffmpeg-filters.html#deband + +从输入视频中删除条带失真。 通过将带状像素替换为参考像素的平均值来工作。 + +### 参数 + +- 1thr +- 2thr +- 3thr +- 4thr 设置每个通道的条带检测阈值。默认值为 0.02。有效范围是 0.00003 至 0.5。如果当前像素和参考像素之间的差异小于阈值,则将其视为带状。 +- range, r 条带检测范围(以像素为单位)。默认值为 16。如果为正,将使用 0 到设置值范围内的随机数。如果为负,将使用确切的绝对值。该范围定义了当前像素周围四个像素的平方。 +- direction, d 设置以弧度为单位的方向,将从该方向比较四个像素。如果为正,则将选择从 0 到设置方向的随机方向。如果为负,则将选择绝对值的精确值。例如,方向 0,-PI 或 -2 * PI 弧度将仅选择同一行上的像素,而 -PI / 2 将仅选择同一列上的像素。 +- blur, b 如果启用,则将当前像素与所有四个周围像素的平均值进行比较。默认启用。如果禁用,则将当前像素与周围的所有四个像素进行比较。如果只有与周围像素的所有四个差异均小于阈值,则该像素被视为带状。 +- coupling, c 如果启用,则当且仅当所有像素分量都被镶边时,例如,才改变当前像素。针对所有颜色分量触发条带检测阈值。默认设置为禁用。 + +### 示例 + +```python +_ = input(src).deband(r=32).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]deband=r=32[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_deband.mp4 -y -hide_banner +[1.5303s] +``` + +#### 对比 + +[视频对比链接] + +## deblock + +> https://ffmpeg.org/ffmpeg-filters.html#deblock + +从输入视频中删除阻塞的伪像。 + +### 参数 + +- filter 设置过滤器类型,可以是弱的或强的。默认值为强。这控制了应用哪种类型的解块。 +- block 设置块的大小,允许的范围是 4 到 512。默认值为 8。 +- alpha +- beta +- gamma +- delta 设置阻塞检测阈值。允许的范围是 0 到 1。默认值为:alpha 为 0.098,其余为 0.05。使用较高的阈值可提供更多的解块强度。设置 Alpha 控制阈值检测在块的精确边缘。其余选项可控制边缘附近的阈值检测。下方 / 上方或左侧 / 右侧的每个。将其中任何一个设置为 0 将禁用解块。 +- planes 设置通道。 + +### 示例 + +```python +_ = input(src).deblock(filter="weak", block=4).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]deblock=block=4:filter=weak[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_deblock.mp4 -y -hide_banner +[1.4129s] +``` + +#### 对比 + +[视频对比链接] + +## decimate + +> https://ffmpeg.org/ffmpeg-filters.html#decimate + +定期删除重复的帧。 + +### 参数 + +- cycle 设置将被删除的帧数。设置为 N 意味着每批 N 帧中的一帧将被删除。默认是 5。 +- dupthresh 设置重复检测阈值。如果一个帧的差度量小于或等于这个值,那么它被声明为重复。默认是 1.1 +- scthresh 设置场景变化阈值。默认值为 15。 +- blockx +- blocky 设置度量标准计算期间使用的 x 和 y 轴块的大小。较大的块可提供更好的噪声抑制,但对小动作的检测也较差。必须是 2 的幂。默认值为 32。 +- ppsrc 将主要输入标记为预处理输入,然后激活干净的源输入流。这允许使用各种过滤器对输入进行预处理,以帮助度量计算,同时保持帧选择无损。设置为 1 时,第一个流用于预处理输入,第二个流是干净的源,从中选择保留的帧。默认值为 0。 +- chroma 设置在度量标准计算中是否考虑色度。默认值为 1。 + +### 示例 + +```python +_ = input(src).decimate(cycle=10).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]decimate=cycle=10[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_decimate.mp4 -y -hide_banner +[0.4016s] +``` + +#### 对比 + +[视频对比链接] + +## deconvolve + +> https://ffmpeg.org/ffmpeg-filters.html#deconvolve + +使用第二流作为冲量,在频域中对视频流进行 2D 反卷积。 + +### 参数 + +- planes 设置处理通道 +- impulse 设置将处理哪些脉冲视频帧,first/all,默认 all +- noise 进行除法时请设置噪音。默认值为 0.0000001。当宽度和高度不相同且不是 2 的幂时,或者在卷积之前的流具有噪声时,此选项很有用 + +### 示例 + +不太懂,略。 + +## dedot + +> https://ffmpeg.org/ffmpeg-filters.html#dedot + +减少视频的交叉亮度(dot-crawl)和交叉颜色(rainbows)。 + +### 参数 + +- m 设置操作模式。可以结合使用 dotcrawl 来降低交叉亮度,和 / 或结合使用 rainbows 来降低交叉颜色。 +- lt 设置空间亮度阈值。较低的值会增加交叉亮度的降低。 +- tl 设置时间亮度的容忍度。较高的值会增加交叉亮度的降低。 +- tc 设置色度时间变化的容限。较高的值会增加交叉色的减少。 +- ct 设置时间色度阈值。较低的值会增加交叉色的减少。 + +### 示例 + +```python +_ = input(src).dedot(m="rainbows").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]dedot=m=rainbows[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_dedot.mp4 -y -hide_banner +[0.4043s] +``` + +#### 对比 + +[视频对比链接] + +## deflate + +> https://ffmpeg.org/ffmpeg-filters.html#deflate + +对视频应用放气效果。该过滤器通过仅考虑低于像素的值,将像素替换为 local( 3x3 )平均值。 + +### 参数 + +- threshold0 +- threshold1 +- threshold2 +- threshold3 限制每个通道的最大变化,默认值为 65535。如果为 0,则通道将保持不变。 + +### 示例 + +```python +_ = input(src).deflate().output(dst).run() +``` + +``` + -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]deflate[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_deflate.mp4 -y -hide_banner +[0.4110s] +``` + +#### 对比 + +不太理解。 + +[视频对比链接] + +## deflicker + +> https://ffmpeg.org/ffmpeg-filters.html#deflicker + +消除时间帧亮度变化。 + +### 参数 + +- size 以帧为单位设置移动平均滤波器的大小。 默认值为5。允许的范围为2-129。 +- mode 设置平均模式以平滑时间亮度变化。 + - ‘am’ Arithmetic mean + - ‘gm’ Geometric mean + - ‘hm’ Harmonic mean + - ‘qm’ Quadratic mean + - ‘cm’ Cubic mean + - ‘pm’ Power mean + - ‘median’ Median +- bypass 实际上不修改帧。当只需要元数据时很有用。 + +### 示例 + +```python +_ = input(src).deflicker(size=10, mode="qm").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]deflicker=mode=qm:size=10[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_deflicker.mp4 -y -hide_banner +[1.1614s] +``` + +#### 对比 + +[视频对比链接] + +## dejudder + +> https://ffmpeg.org/ffmpeg-filters.html#dejudder + +删除部分隔行电视转播的内容所产生的抖动。 + +可以通过例如 pullup 滤波器引入抖动。 如果原始源是部分电视 pullup 的内容,则 dejudder 的输出将具有可变的帧速率。 可能会更改记录的容器帧速率。 除了该更改之外,此过滤器将不会影响恒定帧率的视频。 + +### 参数 + +- cycle 指定抖动在其上重复的窗口的长度。接受大于 1 的任何整数 +- 4 如果原件以 24 fps 到 30 fps 的速度转换(从电影胶片到 NTSC ),默认 +- 5 如果原件以 25 fps 到 30 fps 的速度转播(从 PAL 到 NTSC ) +- 20 如果两者混合。 + +### 示例 + +找不到适用素材。 + +## delogo + +> https://ffmpeg.org/ffmpeg-filters.html#delogo + +通过简单地对周围像素进行插值来隐藏电视台徽标。 只需设置一个覆盖徽标的矩形,然后观察它消失(有时会出现更难看的东西,结果可能会有所不同)。 + +### 参数 + +- x +- y 必须指定徽标的左上角坐标。 +- w +- h 必须指定要清除的徽标的宽度和高度。 +- band, t 指定矩形的模糊边缘的厚度(添加到 w 和 h )。默认值为 1。不建议使用此选项,不再建议设置更高的值。 +- show 设置为 1 时,屏幕上会绘制一个绿色矩形,以简化查找正确的 x,y,w 和 h 参数的过程。默认值为 0。矩形绘制在最外面的像素上,这些像素将(部分)替换为插值。在每个方向上紧接此矩形之外的下一个像素的值将用于计算矩形内的插值像素值。 + +### 示例 + +暂略。 + +## derain + +> https://ffmpeg.org/ffmpeg-filters.html#derain + +使用基于卷积神经网络的 derain 方法去除输入图像/视频中的雨水/雾。 + +### 参数 + +- filter_type derain/dehaze +- dnn_backend native/tensorflow +- model 设置模型文件的路径,以指定网络体系结构及其参数。请注意,不同的后端使用不同的文件格式。TensorFlow 和 native 后端只能按其格式加载文件。 + +### 示例 + +缺少素材,略。 + +## deshake + +> https://ffmpeg.org/ffmpeg-filters.html#deshake + +消除抖动/防抖,尝试解决水平和/或垂直偏移的微小变化。该滤镜有助于消除手持相机,撞击三脚架,在车辆上行驶等引起的相机抖动。 + +### 参数 + +- x +- y +- w +- h 指定一个限制运动矢量搜索的矩形区域。如果需要,可以将运动矢量的搜索限制在由其左上角,宽度和高度定义的帧的矩形区域中。这些参数与可用于可视化边界框位置的绘图框过滤器具有相同的含义。当运动矢量搜索可能会使**对象在帧内同时运动对于摄像机运动造成混淆**时,此功能很有用。如果 x,y,w 和 h 中的任何一个或全部设置为 -1,则使用整个帧。这样就可以设置以后的选项,而无需为运动矢量搜索指定边界框。**默认搜索整个帧**。 +- rx +- ry 在 0 和 64 像素范围内,指定在 x 和 y 方向上的最大移动范围。默认值 16。 +- edge 指定如何生成像素以填充帧边缘的空白。 + - ‘blank, 0’ 填零 + - ‘original, 1’ 原始图像 + - ‘clamp, 2’ 拉伸边值 + - ‘mirror, 3’ 镜面边缘,默认 +- blocksize 指定用于运动搜索的块大小。范围 4-128 像素,默认为 8。 +- contrast 指定块的对比度阈值。仅考虑具有超过指定对比度(最暗像素与最亮像素之间的差异)的块。范围 1-255,默认值为 125。 +- search 指定搜索策略。 + - exhaustive 详尽搜索 + - less 非详尽搜索 +- filename 如果设置,则将运动搜索的详细日志写入指定的文件。 + +### 示例 + +该滤镜假定相机是相对物体是静止的,只有轻微抖动,如果相机或者物体是运动的,效果很差,甚至会加剧抖动幅度。 + +```python +_ = input(data.SHAKE1).deshake(x=20, y=20, w=100, h=100).output(data.TEST_OUTPUTS_DIR / 'deshake.mp4').run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i C:\Users\Admin\Videos\FFmpeg\InputsData\s1.MOV -filter_complex "[0]deshake=h=100:w=100:x=20:y=20[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\FFmpeg\OutputsData\deshake.mp4 -y -hide_banner +[29.4678s] +``` + +## despill + +> https://ffmpeg.org/ffmpeg-filters.html#despill + +消除由绿屏或蓝屏的反射颜色引起的不必要的前景色污染。 + +### 参数 + +- type 设置消除类型。 +- mix 设置 spillmap 的生成方式。 +- expand 设置要除去仍然残留的溢出物的量。 +- red 控制溢出区域中的红色量。 +- green 控制溢出区域的绿色量。绿屏应该为 -1。 +- blue 控制溢出区域中的蓝色量。蓝屏应为 -1。 +- brightness 控制溢出区域的亮度,并保留颜色。 +- alpha 从生成的 spillmap 修改 alpha。 + +### 示例 + +暂略,缺少素材。 + +## detelecine + +> https://ffmpeg.org/ffmpeg-filters.html#detelecine + +对电视电影操作进行精确的逆运算。 它要求使用模式选项指定的预定义模式,该模式必须与传递给电视电影滤镜的模式相同。 + +### 参数 + +- first_field top/bottom +- pattern 一串数字,代表希望应用的下拉模式。预设值为 23。 +- start_frame 一个数字,表示第一帧相对于电视电影图案的位置。如果流被剪切,将使用此方法。默认值为 0。 + +### 示例 + +略,不懂。 + +## dilation + +> https://ffmpeg.org/ffmpeg-filters.html#dilation + +将膨胀效果应用于视频。该滤镜将像素替换为 local ( 3x3 )最大值。 + +### 参数 + +- threshold0 +- threshold1 +- threshold2 +- threshold3 限制每个通道的最大变化,默认值为 65535。如果为 0,则通道将保持不变。 +- coordinates 指定要参考像素的标志。 默认值为 255,即全部使用了八个像素。 + +### 示例 + +略,不懂。 + +## displace + +> https://ffmpeg.org/ffmpeg-filters.html#displace + +如第二和第三输入流所示,移动像素。它接受三个输入流并输出一个流,第一个输入是源,第二个和第三个输入是位移图。第二个输入指定沿 x 轴位移像素的数量,而第三个输入指定沿 y 轴位移像素的数量。 如果位移图流之一终止,则将使用该位移图的最后一帧。请注意,位移贴图一旦生成,便可以反复使用。 + +### 参数 + +- edge 设置超出范围的像素的位移行为。 + - blank 缺少的像素将替换为黑色像素。 + - smear 相邻像素会散开以替换丢失的像素。默认。 + - wrap 超出范围的像素将被包裹,因此它们指向另一侧的像素。 + - mirror 超出范围的像素将替换为镜像像素。 + +### 示例 + +不懂,过于复杂。略。 + +## dnn_processing + +> https://ffmpeg.org/ffmpeg-filters.html#dnn_processing + +用深度神经网络进行图像处理。 它与另一个过滤器一起使用,该过滤器将帧的像素格式转换为 dnn 网络所需的格式。 + +### 参数 + +- dnn_backend native/tensorflow/openvino +- model 设置模型文件的路径,以指定网络体系结构及其参数。 请注意,不同的后端使用不同的文件格式。 TensorFlow,OpenVINO 和 native 后端只能按其格式加载文件。 +- input 设置 DNN 网络的输入名称。 +- output 设置 DNN 网络的输出名称。 +- async 如果设置了 DNN,则使用异步执行(默认值:set);如果后端不支持异步,则回滚以同步执行。 + +### 示例 + +暂略。 + +## drawbox + +> https://ffmpeg.org/ffmpeg-filters.html#drawbox + +在输入图像上绘制一个彩色框。 + +### 参数 + +- x +- y 这些表达式指定框的左上角坐标。默认为 0。 +- w +- h 指定方框的宽度和高度的表达式;如果为 0,则解释为输入的宽度和高度。默认值为 0。 +- color 指定框的颜色。如果使用特殊值反转,则框边颜色与亮度反转的视频相同。 +- thickness, t 该表达式设置框边缘的厚度。填充值将创建一个填充框。预设值为 3。 +- replace 如果输入包含 Alpha,则适用。值为 1 时,涂色框的像素将覆盖视频的颜色和 Alpha 像素。默认值为 0,它将框与输入合成,而视频的 Alpha 保持不变。 + +x,y,w,h,t 表达式可用变量,允许相互引用: + + - dar 输入显示宽高比,与(w / h)* sar相同。 + - hsub + - vsub 水平和垂直色度子样本值。 例如,对于像素格式“ yuv422p”,hsub为2,vsub为1。 + - ih + - iw 输入的宽度和高度。 + - sar 输入样本的宽高比。 + - x + - y 绘制框的x和y偏移坐标。 + - w + - h 绘制框的宽度和高度。 + - t 绘制框的厚度。 + +### 示例 + +#### 在边缘画一个黑框 + +```python +_ = input(src).drawbox().output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]drawbox[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_drawbox1.mp4 -y -hide_banner +[1.2132s] +``` + +##### 对比 + +[视频对比链接] + +#### 画一个红色半透明框 + +```python +_ = input(src).drawbox(10, 20, 200, 60, "red@0.5").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]drawbox=10:20:200:60:red@0.5[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_drawbox2.mp4 -y -hide_banner +[0.6584s] +``` + +##### 对比 + +[视频对比链接] + +#### 画框填充粉色 + +```python +_ = input(src).drawbox(x=10, y=10, w=100, h=100, color="pink@0.5", thickness="fill").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]drawbox=color=pink@0.5:h=100:thickness=fill:w=100:x=10:y=10[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_drawbox3.mp4 -y -hide_banner +[0.4018s] +``` + +##### 对比 + +[视频对比链接] + +#### 表达式画框 + +```python +_ = input(src).drawbox(x="-t", y="0.5*(ih-iw/2.4)-t", w="iw+t*2", h="iw/2.4+t*2", + thickness=2, color="red").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]drawbox=color=red:h=iw/2.4+t*2:thickness=2:w=iw+t*2:x=-t:y=0.5*(ih-iw/2.4)-t[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_drawbox3.mp4 -y -hide_banner +[0.3558s] +``` + +##### 对比 + +[视频对比链接] + +## drawgraph + +> https://ffmpeg.org/ffmpeg-filters.html#drawgraph + +使用输入的视频元数据绘制图形。 + +### 参数 + +- m1 设置第一帧元数据键,从中将使用元数据值绘制图形。 +- fg1 设置第一个前景色。 +- m2 +- fg2 +- m3 +- fg3 +- m4 +- fg4 +- min 设置元数据值的最小值。 +- max 设置元数据值的最大值。 +- bg 设置图形背景颜色。 默认为白色。 +- mode 设置图形模式。bar/dot/line,默认 line。 +- slide 设置幻灯片模式。 + - frame 当到达右边框时绘制新帧。默认。 + - replace 用新列替换旧列。 + - scroll 从右向左滚动。 + - rscroll 从左向右滚动。 + - picture 画单张画。 +- size 设置图形视频的大小。默认 900x256 +- rate, r 设置输出帧速率。缺省值为 25。 + +前景色表达式可以使用以下变量: +- MIN +- MAX 元数据值的最大/小值。 +- VAL 当前元数据键值。 + +颜色定义为 0xAABBGGRR。 + +### 示例 + +不懂,略。 + +## drawgrid + +> https://ffmpeg.org/ffmpeg-filters.html#drawgrid + +在输入图像上绘制网格。 + +### 参数 + +- x +- y 这些表达式指定框的左上角坐标。默认为 0。 +- w +- h 指定方框的宽度和高度的表达式;如果为 0,则解释为输入的宽度和高度。默认值为 0。 +- color 指定框的颜色。如果使用特殊值反转,则框边颜色与亮度反转的视频相同。 +- thickness, t 该表达式设置框边缘的厚度。填充值将创建一个填充框。预设值为 3。 +- replace 如果输入包含 Alpha,则适用。值为 1 时,涂色框的像素将覆盖视频的颜色和 Alpha 像素。默认值为 0,它将框与输入合成,而视频的 Alpha 保持不变。 + +x,y,w,h,t 表达式可用变量,允许相互引用: + + - dar 输入显示宽高比,与(w / h)* sar相同。 + - hsub + - vsub 水平和垂直色度子样本值。 例如,对于像素格式“ yuv422p”,hsub为2,vsub为1。 + - ih + - iw 输入的宽度和高度。 + - sar 输入样本的宽高比。 + - x + - y 绘制框的x和y偏移坐标。 + - w + - h 绘制框的宽度和高度。 + - t 绘制框的厚度。 + +### 示例 + +```python +_ = input(src).drawgrid(w=100, h=100, thickness=2, color="red@0.5").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]drawgrid=color=red@0.5:h=100:thickness=2:w=100[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_drawbox3.mp4 -y -hide_banner +[1.1525s] +``` + +#### 对比 + +[视频对比链接] + +## drawtext + +> https://ffmpeg.org/ffmpeg-filters.html#drawtext + +使用 libfreetype 库在视频顶部的指定文件中绘制文本字符串或文本。 + +### 参数 + +- box 用于使用背景色在文本周围绘制框。该值必须为 1 启用或 0 禁用。box 的默认值为 0。 +- boxborderw 使用 boxcolor 设置要在框周围绘制的边框的宽度。boxborderw 的默认值为 0。 +- boxcolor 用于在文本周围绘制框的颜色。默认白色。 +- line_spacing 使用框设置要在框周围绘制的边框的行距(以像素为单位)。 line_spacing 的默认值为 0。 +- borderw 使用 bordercolor 设置要在文本周围绘制的边框的宽度。borderw 的默认值为 0。 +- bordercolor 设置用于在文本周围绘制边框的颜色。默认黑色。 +- expansion 选择文本的扩展方式。none/strftime/normal。默认 normal。 +- basetime 设置计数的开始时间。值以微秒为单位。仅在不建议使用的 strftime 扩展模式下应用。要在正常扩展模式下进行仿真,请使用 pts 函数,并提供开始时间(以秒为单位)作为第二个参数。 +- fix_bounds 如果为 true,检查并修复文本坐标以避免剪贴。 +- fontcolor 用于绘制字体的颜色。默认黑色。 +- fontcolor_expr 以与文本相同的方式扩展以获得动态 fontcolor 值的字符串。默认情况下,此选项的值为空,并且不进行处理。设置此选项后,它将覆盖 fontcolor 选项。 +- font 用于绘制文本的字体系列。 默认 Sans。 +- fontfile 用于绘制文本的字体文件。 该路径必须包括在内。 如果禁用 fontconfig 支持,则此参数是必需的。 +- alpha 应用 Alpha 混合绘制文本。该值可以是 0.0 到 1.0 之间的数字。该表达式也接受相同的变量 x,y。默认值为 1。请参阅 fontcolor_expr。 +- fontsize 用于绘制文本的字体大小。 fontsize 的默认值为 16。 +- text_shaping 如果设置为 1,则在绘制文本之前,先尝试使其变形(例如,反转从右到左文本的顺序并连接阿拉伯字符)。否则,只需完全按照给定的文字绘制即可。默认情况下为 1 (如果支持)。 +- ft_load_flags 用于加载字体的标志。这些标志对应 libfreetype 支持的标志,是以下值的组合: + - default + - no_scale + - no_hinting + - render + - no_bitmap + - vertical_layout + - force_autohint + - crop_bitmap + - pedantic + - ignore_global_advance_width + - no_recurse + - ignore_transform + - monochrome + - linear_design + - no_autohint +- shadowcolor 用于在绘制的文本后面绘制阴影的颜色。 +- shadowx +- shadowy 文本阴影位置相对于文本位置的 x 和 y 偏移量。它们可以是正值或负值。两者的默认值为“0”。 +- start_number n/frame_num 变量的起始帧号。系统默认值为“0”。 +- tabsize 用于呈现 tab 的空格数的大小。 预设值为 4。 +- timecode 将初始时间码表示形式设置为 "hh:mm:ss[:;.]ff"。 它可以与或不与 text 参数一起使用。 必须指定 timecode_rate 选项。 +- timecode_rate 设置时间码帧速率(仅时间码)。 值将四舍五入到最接近的整数。 最小值为 “1”。 帧速率 30 和 60 支持丢帧时间码。 +- tc24hmax 如果设置为 1,则 timecode 选项的输出将在 24 小时左右结束。默认值为 0 (禁用)。 +- text 要绘制的文本字符串。文本必须是 UTF-8 编码字符的序列。如果未使用参数 textfile 指定文件,则此参数是必需的。 +- textfile 包含要绘制的文本的文本文件。文本必须是 UTF-8 编码字符的序列。如果未使用参数 text 指定文本字符串,则此参数是必需的。如果同时指定了文本和文本文件,则会引发错误。 +- reload 如果设置为 1,则文本文件将在每帧之前重新加载。 请确保以原子方式进行更新,否则可能会被部分读取,甚至失败。 +- x +- y 这些表达式指定在视频帧中将在其中绘制文本的偏移量。它们相对于输出图像的顶部 / 左侧边框。x 和 y 的默认值为 “0”。 + +x 和 y 的参数是包含以下常量和函数的表达式: + - dar 输入显示宽高比,与(w / h)* sar 相同。 + - hsub + - vsub 水平和垂直色度子样本值。 例如,对于像素格式 “yuv422p”,hsub 为 2,vsub 为 1。 + - line_h, lh 每行文字的高度。 + - main_h, h, H + - main_w, w, W 输入高度/宽度。 + - max_glyph_a, ascent 对于所有渲染的字形,从基线到用于放置字形轮廓点的最高 / 上网格坐标的最大距离。由于网格的方向( Y 轴朝上),因此该值为正值。 + - max_glyph_d, descent 对于所有渲染的字形,从基线到用于放置字形轮廓点的最低网格坐标的最大距离。由于网格的方向,这是一个负值,Y 轴朝上。 + - max_glyph_h 最大字形高度,即渲染文本中包含的所有字形的最大高度,它等效于 ascent - descent。 + - max_glyph_w 最大字形宽度,即所呈现文本中包含的所有字形的最大宽度。 + - n 输入帧的数量,从 0 开始。 + - rand(min, max) 返回介于最小值和最大值之间的随机数。 + - sar 输入样本的宽高比。 + - t 以秒为单位的时间戳,如果输入的时间戳未知,则为 NAN。 + - text_h, th + - text_w, tw 呈现文字的宽度/高度。 + - x + - y 绘制文本的位置的 x 和 y 偏移坐标。x/y 可以相互引用。 + - pict_type 当前帧图片类型的一个字符描述。 + - pkt_pos 当前数据包在输入文件或流中的位置(以字节为单位,从输入开始算起)。值 -1 表示此信息不可用。 + - pkt_duration 当前数据包的持续时间(以秒为单位)。 + - pkt_size 当前数据包的大小(以字节为单位)。 + +## 文本表达式 + +设置为 none,则逐字打印文本。设置为 normal,则使用以下扩展机制: + +反斜杠字符 “\”,后跟任何字符,始终扩展为第二个字符。 + +`%{...}` 形式的序列被扩展。大括号之间的文本是函数名称,后面可能跟有以 “:” 分隔的参数。如果参数包含特殊字符或定界符 “:”、“}”,则应将其转义。 + +请注意,它们也可能必须转义为 filter 参数字符串中的 text 选项的值和 filtergraph 描述中的 filter 参数,并且还可能要转义为多达四个转义级别的 shell,使用文本文件可以避免这些问题。 + +以下函数可用: + +- expr, e 表达式求值结果。它必须接受一个参数,指定要计算的表达式,该参数接受与 x 和 y 值相同的常量和函数。注意,并不是所有常量都应该被使用,例如,在计算表达式时文本大小是未知的,因此常量 text_w 和 text_h 将有一个未定义的值。 +- expr_int_format, eif 第一个参数是要评估的表达式,就像 expr 函数一样。第二个参数指定输出格式。允许的值为 “x”,“X”,“d” 和 “u”。它们与 printf 函数中的对待完全相同。第三个参数是可选的,它设置输出所占据的位置数。它可用于从左侧添加零填充。 +- gmtime 过滤器运行的时间,用 UTC 表示。它可以接受一个参数: strftime() 格式字符串。 +- localtime 过滤器运行的时间,以当地时区表示。它可以接受一个参数: strftime() 格式字符串。 +- metadata 帧元数据。接受一两个参数。第一个参数是必需参数,它指定元数据密钥。第二个参数是可选的,并指定默认值,当未找到元数据键或该参数为空时使用。可以通过检查以运行 ffprobe -show_frames 打印的每个帧部分中包含的 TAG 开头的条目来标识可用的元数据。也可以使用在过滤器中生成的字符串元数据(通向绘画文本过滤器)。 +- n, frame_num 帧号,从0开始。 +- pict_type 当前图片类型的一个字符描述。 +- pts 当前帧的时间戳。它最多可以有三个参数。第一个参数是时间戳的格式,它默认将转换秒作为具有微秒精度的十进制数,hms 表示格式化的 [-]HH:MM:SS。具有毫秒精度的 mmm 时间戳。gmtime 表示格式为 UTC 时间的帧的时间戳,localtime 表示格式为本地时区时间的帧的时间戳。第二个参数是添加到时间戳的偏移量。如果格式设置为 hms,则可以提供第三个参数 24HH,以 24 小时格式 (00-23) 表示已格式化时间戳的小时部分。如果格式设置为 localtime 或 gmtime,则可以提供第三个参数:strftime() 格式字符串。缺省情况下,格式为 YYYY-MM-DD HH:MM:SS。 + +## 改变进行中的参数 + +``` +sendcmd=c='56.0 drawtext reinit fontsize=56\:fontcolor=green\:text=Hello\\ World' +``` + +### 示例 + +#### 简单文本 + +```python +_ = input(src).drawtext(fontfile=f1, text="测试绘制文本", fontsize=36).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]drawtext=fontfile=testdata\\\\f1.ttf:fontsize=36:text=测试绘制文本[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_drawtext.mp4 -y -hide_banner +[0.5606s] +``` + +> 转义相当复杂。 + +#### 对比 + +[视频对比链接] + +#### 指定颜色、背景框、背景色 + +```python +_ = input(src).drawtext(fontfile=f1, text="测试绘制文本", x=100, y=50, + fontsize=24, fontcolor="yellow@0.2", box=1, + boxcolor="red@0.2").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]drawtext=box=1:boxcolor=red@0.2:fontcolor=yellow@0.2:fontfile=testdata\\\\f1.ttf:fontsize=24:text=测试绘制文本:x=100:y=50[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_drawtext2.mp4 -y -hide_banner +[0.3209s] +``` + +#### 对比 + +[视频对比链接] + +#### 居中显示文本 + +```python +_ = input(src).drawtext(fontfile=f1, text="测试绘制文本", fontsize=36, + x="(w-text_w)/2", y="(h-text_h)/2").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]drawtext=fontfile=testdata\\\\f1.ttf:fontsize=36:text=测试绘制文本:x=(w-text_w)/2:y=(h-text_h)/2[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_drawtext3.mp4 -y -hide_banner +[0.3359s] +``` + +#### 对比 + +[视频对比链接] + +#### 每秒变换随机位置 + +```python +_ = input(src).drawtext(fontfile=f1, text="测试绘制文本", fontsize=36, + x="if(eq(mod(t,1),0),rand(0,(w-text_w)),x)", + y="if(eq(mod(t,1),0),rand(0,(h-text_h)),y)").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]drawtext=fontfile=testdata\\\\f1.ttf:fontsize=36:text=测试绘制文本:x=if(eq(mod(t\,1)\,0)\,rand(0\,(w-text_w))\,x):y=if(eq(mod(t\,1)\,0)\,rand(0\,(h-text_h))\,y)[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_drawtext4.mp4 -y -hide_banner +[0.3033s] +``` + +#### 对比 + +[视频对比链接] + +#### 从左往右滚动文本 + +```python +_ = input(src).drawtext(fontfile=f1, text="Show a text line sliding from right to " + "left in the last row of the video frame. The file" + " LONG_LINE is assumed to contain a single line " + "with no newlines.", fontsize=16, + y="h-line_h", x="-50*t").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]drawtext=fontfile=testdata\\\\f1.ttf:fontsize=16:text=Show a text line sliding from right to left in the last row of the video frame. The file LONG_LINE is assumed to contain a single line with no newlines.:x=-50*t:y=h-line_h[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_drawtext5.mp4 -y -hide_banner +[0.6457s] +``` + +#### 对比 + +[视频对比链接] + +#### 从下往上滚动文本 + +```python +_ = input(src).drawtext(fontfile=f1, text="Show the text off the bottom of " + "the frame and scroll up.", + fontsize=16, y="h-20*t").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]drawtext=fontfile=testdata\\\\f1.ttf:fontsize=16:text=Show the text off the bottom of the frame and scroll up.:y=h-20*t[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_drawtext6.mp4 -y -hide_banner +[0.3349s] +``` + +#### 对比 + +[视频对比链接] + +#### 中心绘制绿色的字母 + +```python +_ = input(src).drawtext(fontfile=f1, text="G", fontcolor="green", fontsize=36, + x="(w-max_glyph_w)/2", y="h/2-ascent").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]drawtext=fontcolor=green:fontfile=testdata\\\\f1.ttf:fontsize=36:text=G:x=(w-max_glyph_w)/2:y=h/2-ascent[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_drawtext7.mp4 -y -hide_banner +[0.3204s] +``` + +#### 对比 + +[视频对比链接] + +#### 间歇性显示 + +```python +_ = input(src).drawtext(fontfile=f1, text="间歇性显示", fontsize=36, + x=100, y="x/dar", enable="lt(mod(t,1),0.5)").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]drawtext=enable=lt(mod(t\,1)\,0.5):fontfile=testdata\\\\f1.ttf:fontsize=36:text=间歇性显示:x=100:y=x/dar[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_drawtext8.mp4 -y -hide_banner +[0.4989s] +``` + +#### 对比 + +[视频对比链接] + +#### 根据视频分辨率调整 + +```python +_ = input(src).drawtext(fontfile=f1, text="测试绘制文本", + fontsize="h/10", x="(w-text_w)/2", + y="(h-text_h*2)").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]drawtext=fontfile=testdata\\\\f1.ttf:fontsize=h/10:text=测试绘制文本:x=(w-text_w)/2:y=(h-text_h*2)[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_drawtext9.mp4 -y -hide_banner +[0.3431s] +``` + +#### 对比 + +[视频对比链接] + +#### 显示实时时间 + +```python +_ = input(src).drawtext(fontfile=f1, text="%{localtime:%a %b %d %Y}", + fontsize=36).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]drawtext=fontfile=testdata\\\\f1.ttf:fontsize=36:text=%{localtime\\:%a %b %d %Y}[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_drawtext10.mp4 -y -hide_banner +[0.3401s] +``` + +#### 对比 + +[视频对比链接] + +## edgedetect + +> https://ffmpeg.org/ffmpeg-filters.html#edgedetect + +检测并绘制边缘。 过滤器使用 Canny Edge Detection 算法。 + +### 参数 + +- low +- high 设置 Canny 阈值算法使用的低和高阈值。高阈值选择 “ 强 ” 边缘像素,然后通过 8 连接将其与低阈值选择的 “ 弱 ” 边缘像素连接。高低阈值必须在 [0,1] 范围内选择,低应小于或等于高。低的默认值为 20/255,高的默认值为 50/255。 +- mode 定义绘图模式。 + - wires 在黑色背景上绘制白色/灰色线。默认。 + - colormix 混合颜色以创建绘画/卡通效果。 + - canny 在所有选定通道上应用 Canny 边缘检测器。 +- planes 设置通道。 + +### 示例 + +#### 标准边缘检测 + +```python +_ = input(src).edgedetect(low=0.1, high=0.4).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]edgedetect=high=0.4:low=0.1[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_edgedetect.mp4 -y -hide_banner +[1.0472s] +``` + +##### 对比 + +[视频对比链接] + +#### 绘画效果 + +```python +_ = input(src).edgedetect(mode="colormix", high=0).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]edgedetect=high=0:mode=colormix[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_edgedetect2.mp4 -y -hide_banner +[1.7482s] +``` + +#### 对比 + +[视频对比链接] + +## elbg + +> https://ffmpeg.org/ffmpeg-filters.html#elbg + +使用 ELBG (增强的 LBG )算法应用后代效果。对于每个输入图像,滤镜将根据给定的码本长度(即不同输出颜色的数量)计算从输入到输出的最佳映射。 + +### 参数 + +- codebook_length, l 设置码本长度。该值必须是一个正整数,并且代表不同输出颜色的数量。默认值为 256。 +- nb_steps, n 设置最大迭代次数以应用于计算最佳映射。值越高,结果越好,计算时间也越长。预设值为 1。 +- seed, s 设置随机种子,必须为 0 到 UINT32_MAX 之间的整数。如果未指定,或者显式设置为 -1,则过滤器将尽最大努力尝试使用良好的随机种子。 +- pal8 设置 pal8 输出像素格式。此选项不适用于码本长度大于 256 的情况。 + +### 示例 + +不懂,略。 + +## entropy + +> https://ffmpeg.org/ffmpeg-filters.html#entropy + +在视频帧的颜色通道的直方图中测量灰度级熵。 + +### 参数 + +- mode 可以是 normal / diff。 默认是 normal 的。diff 模式可测量直方图增量值的熵,相邻直方图值之间的绝对差。 + +### 示例 + +不懂,略。 + +## epx + +> https://ffmpeg.org/ffmpeg-filters.html#epx + +应用专为像素艺术设计的 EPX 放大滤镜。 + +### 参数 + +- n 设置缩放尺寸:2xEPX 为 2,3xEPX 为 3。默认值为 3。 + +### 示例 + +不懂,略。 + +## eq + +> https://ffmpeg.org/ffmpeg-filters.html#eq + +设置亮度,对比度,饱和度和近似伽玛调整。 + +### 参数 + +- contrast 设置对比度表达式。 该值必须是 -1000.0 到 1000.0 范围内的浮点值。默认值为 “1”。 +- brightness 设置亮度表达式。 该值必须是 -1.0 到 1.0 范围内的浮点值。默认值为 “0”。 +- saturation 设置饱和度表达式。该值必须是介于 0.0 到 3.0 之间的浮点数。默认值为 “1”。 +- gamma 设置伽玛表达式。 该值必须是介于 0.1 到 10.0 之间的浮点数。默认值为“1”。 +- gamma_r 将伽玛表达式设置为红色。该值必须是介于 0.1 到 10.0 之间的浮点数。默认值为“1”。 +- gamma_g 将伽玛表达式设置为绿色。该值必须是介于 0.1 到 10.0 之间的浮点数。默认值为“1”。 +- gamma_b 将伽玛表达式设置为蓝色。该值必须是介于 0.1 到 10.0 之间的浮点数。默认值为“1”。 +- gamma_weight 设置伽玛权重表达式。它可用于减少高伽玛值对明亮图像区域(例如,图像区域)的影响。防止它们过度放大而变成纯白色。该值必须是 0.0 到 1.0 范围内的浮点数。值 0.0 会完全降低伽玛校正,而值 1.0 会使它保持其全部强度。默认值为“1”。 +- eval 在评估亮度,对比度,饱和度和伽玛表达式时设置。 + - init 在过滤器初始化期间或处理命令时仅对表达式求值一次,默认 + - frame 计算每个传入帧的表达式,表达式可以使用以下变量: + - n 输入帧的帧数从 0 开始 + - pos 输入文件中相应数据包的字节位置,如果未指定,则为 NAN + - r 输入视频的帧速率,如果输入帧速率未知,则为 NAN + - g 以秒为单位的时间戳,如果输入的时间戳未知,则为 NAN + +### 示例 + +暂略。 + +## erosion + +> https://ffmpeg.org/ffmpeg-filters.html#erosion + +对视频应用腐蚀效果。该滤镜将像素替换为 local ( 3x3 )最小值。 + +### 参数 + +- threshold0 +- threshold1 +- threshold2 +- threshold3 限制每个通道的最大变化,默认值为 65535。如果为 0,则通道将保持不变。 +- coordinates 指定要参考像素的标志。 默认值为 255,即全部使用了八个像素。 + +### 示例 + +略,不懂。 + +## estdif + +> https://ffmpeg.org/ffmpeg-filters.html#estdif + +对输入视频进行反交错处理(“estdif”代表“边缘坡度追踪反交错过滤器”)。 + +仅空间过滤器,使用边缘斜率跟踪算法对缺失的线进行插值。 + +### 参数 + +- mode 采用隔行扫描方式。 + - frame 每帧输出一帧。 + - field 每个场输出一帧。默认。 +- parity 为输入的隔行视频假定了图片字段奇偶校验。 + - tff 假设最上面的字段是第一位。 + - bff 假设最下面的字段是第一位。 + - auto 启用字段奇偶校验的自动检测。默认。 +- deint 指定要去隔行的帧。 + - all 去隔行扫描所有帧。 + - interlaced 仅反交错帧标记为隔行扫描。 +- rslope 指定边缘坡度跟踪的搜索半径。 默认值为 1。允许的范围是 1 到 15。 +- redge 指定搜索半径以获得最佳边缘匹配。 默认值为 2。允许的范围是 0 到 15。 +- interp 指定使用的插值。 默认为 4 点插补。 + - 2p 2 点插补 + - 4p 4 点插补 + - 6p 6 点插补 + +### 示例 + +暂略。 + +## exposure + +> https://ffmpeg.org/ffmpeg-filters.html#exposure + +调整视频流的曝光。 + +### 参数 + +- exposure 在 EV 中设置曝光校正。允许的范围是 -3.0 到 3.0 EV。默认值为 0 EV。 +- black 设置黑电平校正。允许的范围是 -1.0 到 1.0。预设值为 0。 + +### 示例 + +No such filter: 'colorcorrect' + +官网英文文档虽然还有这个滤镜的说明,但最新版找不到这个滤镜,可能已经移除? + +## extractplanes + +> https://ffmpeg.org/ffmpeg-filters.html#extractplanes + +从输入视频流中提取颜色通道分量到单独的灰度视频流中。 + +### 参数 + +- planes 指定提取通道。 + - ‘y’ + - ‘u’ + - ‘v’ + - ‘a’ + - ‘r’ + - ‘g’ + - ‘b’ + +选择输入中不可用的通道将导致错误。 这意味着无法同时选择带有 yuv 通道的 rgb 通道。 + +### 示例 + +```python +v0_extract = input(src).extractplanes(planes="y+u+v") +e1, e2, e3 = v0_extract[0], v0_extract[1], v0_extract[2] +_ = merge_outputs(e1.output(testdata_transform / "v0_extractplanes_y.mp4"), + e2.output(testdata_transform / "v0_extractplanes_u.mp4"), + e3.output(testdata_transform / "v0_extractplanes_v.mp4")).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]extractplanes=y+u+v[tag0][tag1][tag2]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_extractplanes_y.mp4 -vcodec h264_nvenc -map [tag1] C:\Users\Admin\Videos\transform\v0_extractplanes_u.mp4 -vcodec h264_nvenc -map [tag2] C:\Users\Admin\Videos\transform\v0_extractplanes_v.mp4 -y -hide_banner +[0.6920s] +``` + +#### 对比 + +[视频对比链接] + +## fade + +> https://ffmpeg.org/ffmpeg-filters.html#fade + +对输入视频应用淡入/淡出效果。 + +### 参数 + +- type, t 效果类型可以是“in”(淡入),也可以是“out”(淡入)。默认为 in。 +- start_frame, s 指定要开始应用淡入淡出效果的帧的编号。默认值为 0。 +- nb_frames, n 淡入淡出效果持续的帧数。在淡入效果结束时,输出视频将具有与输入视频相同的强度。在淡出过渡结束时,输出视频将使用选定的颜色填充。默认值为 25。 +- alpha 如果设置为 1,则如果输入中存在一个 alpha 通道,则仅淡入该通道。预设值为 0。 +- start_time, st 指定开始应用淡入淡出效果的帧的时间戳(以秒为单位)。如果同时指定了 start_frame 和 start_time,则淡入淡出将以最后一个为准。默认值为 0。 +- duration, d 淡入淡出效果必须持续的秒数。在淡入效果结束时,输出视频将具有与输入视频相同的强度,在淡出过渡结束时,输出视频将被选定的颜色填充。如果同时指定了 duration 和 nb_frames,则使用 duration。默认值为 0(默认使用 nb_frames)。 +- color, c 指定淡入淡出的颜色。默认值为“黑色”。 + +### 示例 + +#### 最初 30 帧淡入 + +```python +_ = input(src).fade("in", 0, 30).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]fade=in:0:30[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_fade1.mp4 -y -hide_banner +[0.3845s] +``` + +#### 对比 + +[视频对比链接] + +#### 最后 45 帧淡出 + +```python +_ = input(src).fade("out", 100, 45).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]fade=out:100:45[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_fade2.mp4 -y -hide_banner +[0.4127s] +``` + +#### 对比 + +[视频对比链接] + +#### 同时添加淡入淡出 + +```python +_ = input(src).fade("in", 0, 25).fade("out", 75, 25).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]fade=in:0:25[tag0];[tag0]fade=out:75:25[tag1]" -vcodec h264_nvenc -map [tag1] C:\Users\Admin\Videos\transform\v0_fade3.mp4 -y -hide_banner +[0.3532s] +``` + +#### 对比 + +[视频对比链接] + +#### 指定淡入颜色 + +```python +_ = input(src).fade("in", 5, 20, color="yellow").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]fade=in:5:20:color=yellow[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_fade4.mp4 -y -hide_banner +[0.5072s] +``` + +#### 对比 + +[视频对比链接] + +#### 设置透明度 + +```python +_ = input(src).fade("in", 0, 25, alpha=1).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]fade=in:0:25:alpha=1[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_fade5.mp4 -y -hide_banner +[0.4154s] +``` + +#### 对比 + +[视频对比链接] + +#### 按秒设置淡入 + +```python +_ = input(src).fade(t="in", start_time=1, duration=2).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]fade=duration=2:start_time=1:t=in[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_fade6.mp4 -y -hide_banner +[0.3846s] +``` + +#### 对比 + +[视频对比链接] + +## fftdnoiz + +> https://ffmpeg.org/ffmpeg-filters.html#fftdnoiz + +使用 3D FFT(频域滤波)对帧进行消噪。 + +### 参数 + +- sigma 设置噪声 sigma 常数。这设置了去噪强度。默认值是 1。允许的范围是 0 到 30。使用非常高的 sigma 和较低的重叠可能会产生阻塞伪像。 +- amount 设置去噪量。默认情况下,所有检测到的噪声都会降低。默认值为 1。允许的范围是 0 到 1。 +- block 设置块的大小,默认为 4,可以是 3、4、5 或 6。以像素为单位的块的实际大小为块的幂的 2,因此默认情况下,以像素为单位的块大小为 2 ^ 4,即 16。 +- overlap 设置块重叠。默认值为 0.5。允许范围是 0.2 到 0.8。 +- prev 设置用于降噪的先前帧数。默认情况下设置为 0。 +- next 设置要用于降噪的下一帧数。默认情况下设置为 0。 +- planes 默认情况下,将被过滤的设置通道是所有可用的过滤(除 alpha 之外)。 + +### 示例 + +暂略。 + +## fftfilt + +> https://ffmpeg.org/ffmpeg-filters.html#fftfilt + +将任意表达式应用于频域中的样本。 + +### 参数 + +- dc_Y 调整图像亮度通道的直流值(增益)。过滤器接受范围为 0 到 1000 的整数值。默认值设置为 0。 +- dc_U 调整图像第一色度通道的直流值(增益)。过滤器接受范围为 0 到 1000 的整数值。默认值设置为 0。 +- dc_V 调整图像第二色度通道的 dc 值(增益)。过滤器接受范围为 0 到 1000 的整数值。默认值设置为 0。 +- weight_Y 设置亮度通道的频域权重​​表达式。 +- weight_U 设置第一个色度通道的频域权重​​表达式。 +- weight_V 设置第二色度通道的频域权重​​表达式。 +- eval 在计算表达式时设置。 + - init 在过滤器初始化期间仅对表达式求值一次。默认。 + - frame 计算每个传入帧的表达式。 +- X +- Y 当前样本的坐标。 +- W +- H 图像的宽度和高度。 +- N 输入帧数,从 0 开始。 + +### 示例 + +```python +_ = input(src).fftfilt(dc_Y=0, weight_Y='squish((Y+X)/100-1)').output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]fftfilt=dc_Y=0:weight_Y=squish((Y+X)/100-1)[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_fftfilt4.mp4 -y -hide_banner +[1.8354s] +``` + +#### 对比 + +[视频对比链接] + +## field + +> https://ffmpeg.org/ffmpeg-filters.html#field + +使用跨步算法从隔行扫描图像中提取单个字段,以避免浪费CPU时间。 输出帧被标记为非隔行扫描。 + +### 参数 + +- type 指定是提取顶部(如果值为 0 或 top)还是底部字段(如果值为 1 或 bottom)。 + +### 示例 + +暂略。 + +## fieldhint + +> https://ffmpeg.org/ffmpeg-filters.html#fieldhint + +通过从 hint 文件以数字形式提供的周围帧中复制顶部和底部字段来创建新帧。 + +### 参数 + +- hint 设置包含提示的文件:绝对 / 相对帧号。剪辑中的每一帧必须有一行。每行必须包含两个数字,并用逗号分隔,并可以选择后面跟 - 或 +。文件每一行上提供的数字不能超出 [N-1,N+1],其中对于绝对模式,N 是当前帧号;对于相对模式,其数字不能超出 [-1,1] 范围。第一个数字告诉从哪个帧拾取顶部场,第二个数字告诉从哪个帧拾取底部场。如果可选地在其后跟 + 输出帧,则将其标记为隔行扫描;否则,如果其后跟 - 输出帧,则将其标记为逐行扫描,否则将与输入帧标记为相同。如果可选地后跟 t,则输出帧将仅使用顶场,或者在 b 的情况下,将仅使用底场。如果行以#或;开头 该行被跳过。 +- mode absolute/relative + +relative 模式 hint 文件示例 + +``` +0,0 - # first frame +1,0 - # second frame, use third's frame top field and second's frame bottom field +1,0 - # third frame, use fourth's frame top field and third's frame bottom field +1,0 - +0,0 - +0,0 - +1,0 - +1,0 - +1,0 - +0,0 - +0,0 - +1,0 - +1,0 - +1,0 - +0,0 - +``` + +### 示例 + +不懂,略。 + +## fieldmatch + +> https://ffmpeg.org/ffmpeg-filters.html#fieldmatch + +反向电视电影的场匹配滤波器。它旨在从电视广播流中重建渐进帧。该滤波器不会丢失重复的帧,因此要实现完整的反电视电影 `fieldmatch`,需要紧随其后的是抽取滤波器,例如滤波器图中的抽取。 + +场匹配和抽取的分离尤其是由在两者之间插入去隔行滤波器回退的可能性引起的。如果源混合了电视转播和真实的隔行扫描内容,则 `fieldmatch` 将无法匹配隔行扫描部分的字段。但是这些剩余的精梳帧将被标记为隔行扫描,因此可以在抽取前由更高版本的过滤器(例如 yadif)进行去隔行处理。 + +除了各种配置选项之外,`fieldmatch` 还可以通过 ppsrc 选项激活第二个可选流。如果启用,则帧重构将基于第二个流中的字段和帧。这样可以对第一个输入进行预处理,以帮助滤波器的各种算法,同时保持输出无损(假设字段正确匹配)。通常,现场感知的降噪器或亮度 / 对比度调整会有所帮助。 + +请注意,此过滤器使用与 TIVTC / TFM(AviSynth 项目)和 VIVTC / VFM(VapourSynth 项目)相同的算法。后者是 TFM 的轻型克隆,场匹配基于该克隆。尽管语义和用法非常接近,但某些行为和选项名称可能有所不同。 + +目前,抽取滤波器仅适用于恒定帧频输入。如果您的输入混合了电视转播(30fps)和渐进式内容,且帧率较低,例如 24fps,请使用以下过滤器链产生必要的 cfr 流:`dejudder,fps=30000/1001,fieldmatch,decimate`。 + +### 参数 + +不懂,略。 + +## fieldorder + +> https://ffmpeg.org/ffmpeg-filters.html#fieldorder + +转换输入视频的场序。 + +### 参数 + +- order 输出字段顺序。 有效值是 tff(对于顶部字段优先,默认)或 bff(对于底部字段优先)。 + +通过将图片内容上移或下移一行,然后用适当的图片内容填充剩余的行来完成转换。此方法与大多数广播现场顺序转换器一致。 + +如果未将输入视频标记为隔行扫描,或者已将其标记为具有所需的输出场顺序,则此过滤器不会更改传入的视频。 + +在转换为 PAL DV 材料或从中转换为 PAL DV 材料时,此功能非常有用,这是最先考虑的。 + +### 示例 + +不懂,略。 + +## fifo / afifo + +> https://ffmpeg.org/ffmpeg-filters.html#fifo_002c-afifo + +缓冲输入图像并在需要时发送它们。当由 libavfilter 帧自动插入时,十分有用。不带参数。 + +## fillborders + +> https://ffmpeg.org/ffmpeg-filters.html#fillborders + +填充输入视频的边框,而无需更改视频流尺寸。 有时,视频的四个边缘可能有垃圾,您可能不希望裁剪视频输入以使大小保持为某个数字的倍数。 + +### 参数 + +- left 从左边框填充的像素数。 +- right 从右边框填充的像素数。 +- top 从顶部边框填充的像素数。 +- bottom 从底部边框填充的像素数。 +- mode 设置填充模式。 + - smear 使用最外面的像素填充像素,默认 + - mirror 使用镜像填充像素(半采样对称) + - fixed 用恒定值填充像素 + - reflect 使用反射填充像素(整个样本对称) + - wrap 使用包装填充像素 + - fade 淡入像素至恒定值 +- color 在固定或淡入淡出模式下为像素设置颜色。 默认为黑色。 + +### 示例 + +```python +_ = input(src).fillborders(left=50, right=50, top=50, bottom=50, mode="mirror").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]fillborders=bottom=50:left=50:mode=mirror:right=50:top=50[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_fillborders.mp4 -y -hide_banner +[0.4578s] +``` + +#### 对比 + +[视频对比链接] + +## find_rect + +> https://ffmpeg.org/ffmpeg-filters.html#find_rect + +查找矩形对象。 + +### 参数 + +- object 对象图像的文件路径,必须为 gray8。 +- threshold 检测阈值,默认为 0.5。 +- mipmaps mipmap 的数量,默认为 3。 +- xmin, ymin, xmax, ymax 指定要搜索的矩形。 + +### 示例 + +缺素材,暂略。 + +## floodfill + +> https://ffmpeg.org/ffmpeg-filters.html#floodfill + +用指定值填充具有相同像素值的区域。 + +Flood fill 算法是从一个区域中提取若干个连通的点与其他相邻区域区分开(或分别染成不同颜色)的经典算法。因为其思路类似洪水从一个区域扩散到所有能到达的区域而得名。 + +### 参数 + +- x 设置像素 x 坐标。 +- y +- s0 设置源 #0 组件值。 +- s1 +- s2 +- s3 +- d0 设置目标 #0 组件值。 +- d1 +- d2 +- d3 + +### 示例 + +```python +_ = input(src).geq( + r="if(lte(r(X,Y),48),0,r(X,Y))", + g="if(lte(g(X,Y),48),0,g(X,Y))", + b="if(lte(b(X,Y),48),0,b(X,Y))", +).floodfill(10, 10, 0, 40, 0, 0, 84, 0).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]format=rgb24,geq="r=if(lte(r(X,Y),48),0,r(X,Y)):g=if(lte(g(X,Y),48),0,g(X,Y)):b=if(lte(b(X,Y),48),0,b(X,Y))",floodfill=10:40:0:0:0:255:0:0[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_floodfill.mp4 -y -hide_banner +``` + +#### 对比 + +[视频对比链接] + +## format + +> https://ffmpeg.org/ffmpeg-filters.html#format + +将输入视频转换为指定的像素格式之一。 Libavfilter 会尝试选择一个适合作为下一个过滤器输入的对象。 + +### 参数 + +- pix_fmts 以“|”分隔的像素格式名称列表,表示或,例如“pix_fmts=yuv420p|monow|rgb24”。 + +### 示例 + +```python +_ = input(src).format("yuv420p", "yuv444p", "yuv410p").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]format=pix_fmts=yuv420p|yuv444p|yuv410p[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_format.mp4 -y -hide_banner +[0.4748s] +``` + +#### 对比 + +[视频对比链接] + +## fps + +> https://ffmpeg.org/ffmpeg-filters.html#fps + +通过根据需要复制或删除帧,将视频转换为指定的恒定帧速率。 + +### 参数 + +- fps 所需的输出帧速率。默认值为 25。 +- start_time 假设第一个 PTS 应该是给定的值,以秒为单位。这允许在流的开头进行填充 / 修剪。默认情况下,不对第一帧的预期 PTS 做任何假设,因此不进行填充或修整。例如,可以将其设置为 0,以在视频流在音频流之后开始时以第一帧的副本填充开头,或者以负 PTS 修剪任何帧。 +- round 时间戳(PTS)舍入方法。 + - zero 四舍五入 + - inf 从 0 舍入 + - down 向 -infinity 舍入 + - up 向 +infinity 舍入 + - near 四舍五入到最接近的 +- eof_action 读取最后一帧时执行的操作。 +- round 使用与其他帧相同的时间戳取整方法。 +- pass 如果尚未达到输入持续时间,则通过最后一帧。 + +或者,可以将选项指定为扁平字符串:`fps[:start_time[:round]]`。 + +### 示例 + +```python +_ = input(src).fps(fps=60).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]fps=fps=60[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_fps.mp4 -y -hide_banner +[0.4259s] +``` + +#### 对比 + +[视频对比链接] + +## framepack + +> https://ffmpeg.org/ffmpeg-filters.html#framepack + +将两个不同的视频流打包到一个立体视频中,在支持的编解码器上设置适当的元数据。 这两个视图应具有相同的大小和帧速率,并且在较短的视频结束时将停止处理。 请注意,您可以使用 scale 和 fps 过滤器方便地调整视图属性。 + +### 参数 + +- format 所需的包装格式。 支持的值为: + - sbs 视图彼此相邻(默认)。 + - tab 这些视图是彼此叠置的。 + - lines 视图按行排列。 + - columns 视图按列打包。 + - frameseq 视图在时间上是交错的。 + +### 示例 + +#### 将左右视图转换为帧序视频 + +```python +_ = input(src).framepack(input(src), format="frameseq").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0][1]framepack=format=frameseq[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_framepack5.mp4 -y -hide_banner +[0.4380s] +``` + +#### 对比 + +[视频对比链接] + +#### 将视图转换为与输入具有相同输出分辨率的并排视频 + +```python +_ = input(src).framepack(input(src), format="sbs").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0][1]framepack=format=sbs[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_framepack1.mp4 -y -hide_banner +[0.4808s] +``` + +#### 对比 + +[视频对比链接] + +## framerate + +> https://ffmpeg.org/ffmpeg-filters.html#framerate + +通过从源帧中插入新的视频输出帧来更改帧速率。 + +此过滤器不能与隔行扫描媒体一起正常使用。 如果要更改隔行扫描媒体的帧速率,则需要在此滤镜之前进行反隔行扫描,并在此滤镜之后进行重新隔行扫描。 + +### 参数 + +- fps 指定每秒的输出帧数。也可以将此选项单独指定为一个值。默认值为 50。 +- interp_start 指定一个范围的起点,在该范围内将创建输出帧为两个帧的线性插值。范围是 [0-255],默认值为 15。 +- interp_end 指定将要创建输出帧的范围的终点,以两个帧的线性插值。范围是 [0-255],默认值是 240。 +- scene 将检测到场景变化的级别指定为 0 到 100 之间的值,以指示新场景; 较低的值表示当前帧引入新场景的可能性较低,而较高的值表示当前帧很可能是一个新场景。默认值为 8.2。 +- flags 指定影响过滤过程的标志。 + - scene_change_detect, scd 使用选项场景的值启用场景更改检测。 默认情况下启用此标志。 + +### 示例 + +```python +_ = input(src).framerate(fps=60).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]framerate=fps=60[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_framerate.mp4 -y -hide_banner +[0.4547s] +``` + +#### 对比 + +[视频对比链接] + +## framestep + +> https://ffmpeg.org/ffmpeg-filters.html#framestep + +每第 N 帧选择一帧。 + +### 参数 + +- step 在每个步骤帧之后选择帧。 允许值为大于 0 的正整数。默认值为 1。 + +### 示例 + +```python +_ = input(src).framestep(step=5).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]framestep=step=5[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_framestep.mp4 -y -hide_banner +[0.3699s] +``` + +#### 对比 + +[视频对比链接] + +## freezedetect + +> https://ffmpeg.org/ffmpeg-filters.html#freezedetect + +检测静止的视频。 + +当此筛选器检测到输入视频在指定持续时间内内容没有明显变化时,会记录一条消息并设置帧元数据。视频静止检测可计算视频帧所有分量的平均平均绝对差,并将其与本底噪声进行比较。 + +打印时间和持续时间以秒为单位。lavfi.freezedetect.freeze_start 元数据密钥在时间戳等于或超过检测持续时间的第一帧上设置,并且包含冻结的第一帧的时间戳。lavfi.freezedetect.freeze_duration 和 lavfi.freezedetect.freeze_end 元数据密钥在冻结后的第一帧上设置。 + +### 参数 + +- noise, n 设置噪声容限。可以以 dB 指定(如果在指定值后附加“dB”)或 0 到 1 之间的差异比率。默认值为 -60dB 或 0.001。 +- duration, d 设置冻结持续时间,直到通知为止(默认为 2 秒)。 + +### 示例 + +暂略。 + +## freezeframes + +> https://ffmpeg.org/ffmpeg-filters.html#freezeframes + +冻结视频帧。 + +该过滤器使用来自第二个输入的帧冻结视频帧。 + +### 参数 + +- first 设置从其开始冻结的第一帧的编号。 +- last 设置结束冻结的最后一帧的编号。 +- replace 设置来自第二个输入的帧数,它将代替替换的帧。 + +### 示例 + +暂略。 + +## frei0r + +> https://ffmpeg.org/ffmpeg-filters.html#frei0r + +对输入视频应用 frei0r 效果。 + +### 参数 + +- filter_name 要加载的 frei0r 效果的名称。如果定义了环境变量 FREI0R_PATH,则会在 FREI0R_PATH 中用冒号分隔的列表指定的每个目录中搜索 frei0r 效果。否则,将按以下顺序搜索标准 frei0r 路径:HOME/.frei0r-1/lib/,/usr/local/lib/frei0r-1/,/usr/lib/frei0r-1/。 +- filter_params 一个用“|”分隔的参数列表,以传递给 frei0r 效果。 + +frei0r 效果参数可以是布尔值(其值为“y”或“n”),双精度型,颜色(指定为 R / G / B,其中 R,G 和 B 是介于 0.0 和 0.0 之间的浮点数)。手册的“颜色”部分中指定的颜色描述,或位置,位置(指定为 X / Y,其中 X 和 Y 为浮点数)和 / 或字符串。 + +参数的数量和类型取决于加载的效果。如果未指定效果参数,则设置默认值。 + +### 示例 + +No such filter: 'frei0r' + +## fspp + +> https://ffmpeg.org/ffmpeg-filters.html#fspp + +应用快速简单的后处理。它是 spp 的更快版本。 + +它将(I)DCT 分为水平 / 垂直通道。与简单的后处理过滤器不同,其中一个对每个块执行一次,而不是对每个像素执行一次。这允许更高的速度。 + +### 参数 + +- quality 设置质量。此选项定义了平均级别数。它接受 4-5 范围内的整数。预设值为 4。 +- qp 强制使用恒定的量化参数。可接受范围为 0-63 的整数。如果未设置,则过滤器将使用视频流中的 QP(如果有)。 +- strength 设置过滤器强度。它接受介于 -15 到 32 之间的整数。较低的值表示更多的细节,但也有更多的伪影,而较高的值表示图像更平滑但也更模糊。默认值为 0- 最佳 PSNR。 +- use_bframe_qp 如果设置为 1,则启用 B 帧中的 QP。由于 B 帧通常具有较大的 QP,因此使用此选项可能会导致闪烁。默认值为 0(未启用)。 + +### 示例 + +略 + +## gblur + +> https://ffmpeg.org/ffmpeg-filters.html#gblur + +应用高斯模糊滤镜。 + +### 参数 + +- sigma 设置水平 sigma,高斯模糊的标准偏差。默认值为 0.5。 +- steps 设置高斯近似的步数。默认值为 1。 +- planes 设置要过滤的通道。默认情况下,所有通道均被过滤。 +- sigmaV 设置垂直 sigma,如果为负,则与 `sigma` 相同。默认值为 -1。 + +### 示例 + +```python +_ = input(src).gblur(sigma=0.45).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]gblur=sigma=0.45[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_gblur.mp4 -y -hide_banner +[0.4855s] +``` + +#### 对比 + +[视频对比链接] + +## geq + +> https://ffmpeg.org/ffmpeg-filters.html#geq + +将通用方程式应用于每个像素。 + +### 参数 + +- lum_expr, lum 设置亮度表达式。 +- cb_expr, cb 设置色度蓝色表达式。 +- cr_expr, cr 设置色度红色表达式。 +- alpha_expr, a 设置 alpha 表达式。 +- red_expr, r 设置红色表达式。 +- green_expr, g 设置绿色表达式。 +- blue_expr, b 设置蓝色表达式。 + +颜色空间是根据指定的选项选择的。如果指定了 lum_expr,cb_expr 或 cr_expr 选项之一,则过滤器将自动选择 YCbCr 颜色空间。如果指定了 red_expr,green_expr 或 blue_expr 选项之一,它将选择 RGB 色彩空间。 + +如果未定义其中一个色度表达式,则它会落在另一个色度表达式上。如果未指定 alpha 表达式,它将计算为不透明值。如果未指定任何色度表达式,则它们将计算为亮度表达式。 + +表达式可用变量: + +- N 过滤后的帧的序号,从 0 开始。 +- X +- Y 当前样本的坐标。 +- W +- H 图像的宽度和高度。 +- SW +- SH 宽度和高度比例取决于当前过滤的通道。它是相应的亮度通道像素数与当前通道像素数之比。例如。对于 YUV 4:2:0,对于亮度通道,值是 1,1,对于色度通道,值是 0.5,0.5。 +- T 当前帧的时间,以秒为单位。 +- p(x,y) 返回当前通道位置 (x,y) 的像素值。 +- lum(x,y) 返回亮度通道位置 (x,y) 的像素值。 +- cb(x,y) 返回位于蓝差色度通道位置 (x,y) 的像素值。如果没有这样的通道则返回 0。 +- cr(x,y) 返回红差色度通道位置 (x,y) 的像素值。如果没有这样的通道则返回 0。 +- r(x,y) +- g(x,y) +- b(x,y) 返回红 / 绿 / 蓝组件位置 (x,y) 的像素值。如果没有这样的组件,则返回 0。 +- alpha(x,y) 返回 alpha 通道位置 (x,y) 的像素值。如果没有这样的通道则返回 0。 +- psum(x,y) +- lumsum(x,y) +- cbsum(x,y) +- crsum(x,y) +- rsum(x,y) +- gsum(x,y) +- bsum(x,y) +- alphasum(x,y) 从 (0,0) 到 (x,y) 的矩形内样本值的总和,这允许获得矩形内样本值的总和。请参阅不带 sum 后缀的函数。 +- interpolation 设置一种插值方法: nearest/bilinear。默认 bilinear。 + +对于函数,如果 x 和 y 在该区域之外,则值将自动裁剪到较近的边缘。 + +请注意,此过滤器可以使用多个线程,在这种情况下,每个切片将具有其自己的表达式状态。如果由于表达式取决于以前的状态而只想使用一个表达式状态,则应将过滤器线程数限制为 1。 + +### 示例 + +#### 水平翻转 + +```python +_ = input(src).geq("p(W-X,Y)").output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]geq=p(W-X\,Y)[tag0]" -map [tag0] C:\Users\Admin\Videos\transform\v0_geq1.mp4 -y -hide_banner +[1.1102s] +``` + +#### 对比 + +[视频对比链接] + +#### 产生二维正弦波 + +```python +_ = input(src).geq("128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y))", 128, 128).output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]geq=128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y)):128:128[tag0]" -map [tag0] C:\Users\Admin\Videos\transform\v0_geq2.mp4 -y -hide_banner +[2.3361s] +``` + +#### 对比 + +[视频对比链接] + +#### 产生一个奇特的神秘移动光 + +```python +_ = input(src).geq("128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y))", 128, 128).output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]geq=random(1)/hypot(X-cos(N*0.07)*W/2-W/2\,Y-sin(N*0.09)*H/2-H/2)^2*1000000*sin(N*0.02):128:128[tag0]" -map [tag0] C:\Users\Admin\Videos\transform\v0_geq3.mp4 -y -hide_banner +[4.8099s] +``` + +#### 对比 + +[视频对比链接] + +#### 生成一个快速浮雕效果 + +```python +_ = input(src).geq(lum_expr='(p(X,Y)+(256-p(X-4,Y-4)))/2').output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]geq=lum_expr=(p(X\,Y)+(256-p(X-4\,Y-4)))/2[tag0]" -map [tag0] C:\Users\Admin\Videos\transform\v0_geq4.mp4 -y -hide_banner +[3.4013s] +``` + +#### 对比 + +[视频对比链接] + +#### 根据像素位置修改 RGB 分量 + +```python +_ = input(src).geq(r='X/W*r(X,Y)', g='(1-X/W)*g(X,Y)', b='(H-Y)/H*b(X,Y)').output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]geq=b=(H-Y)/H*b(X\,Y):g=(1-X/W)*g(X\,Y):r=X/W*r(X\,Y)[tag0]" -map [tag0] C:\Users\Admin\Videos\transform\v0_geq5.mp4 -y -hide_banner +[2.4847s] +``` + +#### 对比 + +[视频对比链接] + +#### 创建与输入大小相同的径向渐变 + +```python +_ = input(src).geq(lum="255*gauss((X/W-0.5)*3)*gauss((Y/H-0.5)*3)/gauss(0)/gauss(0)").output(dst).run() +``` + +``` +ffmpeg -i testdata\media\0.mp4 -filter_complex "[0]geq=lum=255*gauss((X/W-0.5)*3)*gauss((Y/H-0.5)*3)/gauss(0)/gauss(0)[tag0]" -map [tag0] C:\Users\Admin\Videos\transform\v0_geq6.mp4 -y -hide_banner +[3.1773s] +``` + +#### 对比 + +[视频对比链接] + +## gradfun + +> https://ffmpeg.org/ffmpeg-filters.html#gradfun + +修复有时被截断到8位色深的带状伪像,这些带状伪像有时会引入近乎平坦的区域。 插入应该到达带状伪像所在位置的渐变,并抖动它们。 + +它仅设计用于播放。 请勿在有损压缩之前使用它,因为压缩会丢失抖动并带回频段。 + +### 参数 + +- strength 滤镜将改变任何一个像素的最大数量。这也是检测几乎平坦区域的阈值。可接受的范围是 0.51 到 64; 默认值为 1.2。超出范围的值将被裁剪为有效范围。 +- radius 要适合渐变的邻域。较大的半径可实现更平滑的渐变,但同时也会阻止滤镜修改细部区域附近的像素。可接受的值是 8-32 ; 默认值为 16。超出范围的值将被裁剪为有效范围。 + +### 示例 + +```python +_ = input(src).gradfun(3.5, 8).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]gradfun=3.5:8[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_gradfun.mp4 -y -hide_banner +[0.4002s] +``` + +#### 对比 + +[视频对比链接] + +## graphmonitor + +> https://ffmpeg.org/ffmpeg-filters.html#graphmonitor + +显示各种过滤器统计信息。 + +使用此过滤器,可以调试完整的过滤器图。 尤其是用排队的帧填充链接的问题。 + +### 参数 + +- size, s 设置视频输出大小。默认值为 hd720。 +- opacity, 0 设置视频不透明度。默认值为 0.9。允许范围是 0 到 1。 +- mode, m 设置输出模式,可以 fulll 或 compact。在 compact 模式下,只有带有一些排队帧的过滤器才会显示统计信息。 +- flags, f 设置标志,以启用在视频中显示哪些统计信息。 + - queue 显示每个链接中排队的帧数。 + - frame_count_in 显示从滤镜拍摄的帧数。 + - frame_count_out 显示从过滤器发出的帧数。 + - pts 显示当前过滤的帧点数。 + - time 显示当前过滤的帧时间。 + - timebase 显示过滤器链接的时基。 + - format 显示过滤器链接的使用格式。 + - size 如果过滤器链接使用了音频,则显示视频大小或音频通道数。 + - rate 如果过滤器链接使用音频,则显示视频帧率或采样率。 + - eof 显示链接输出状态。 +- rate, r 设置输出流视频速率的上限,默认值为 25。这样可以保证输出视频帧速率不超过该值。 + +### 示例 + +略。 + +## greyedge + +> https://ffmpeg.org/ffmpeg-filters.html#greyedge + +颜色恒定变化滤波器,可通过灰度边缘算法估算场景照明并相应地校正场景颜色。 + +### 参数 + +- difford 应用于场景的区分顺序。必须在 [0,2] 范围内选择,默认值为 1。 +- minknorm 用于计算 Minkowski 距离的 Minkowski 参数。必须在 [0,20] 范围内选择,默认值为 1。设置为 0 以获得最大值,而不是计算 Minkowski 距离。 +- sigma 高斯模糊的标准偏差应用于场景。必须在 [0,1024.0] 范围内选择,默认值= 1。如果 difford 大于 0,则 floor(sigma * break_off_sigma(3))不能等于 0。 + +### 示例 + +```python +_ = input(src).greyedge(1, 5, 2).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]greyedge=1:5:2[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_greyedge.mp4 -y -hide_banner +[3.3097s] +``` + +#### 对比 + +[视频对比链接] + +## haldclut + +> https://ffmpeg.org/ffmpeg-filters.html#haldclut + +将 Hald CLUT 应用于视频流。 + +第一个输入是要处理的视频流,第二个输入是 Hald CLUT。Hald CLUT 输入可以是简单的图片或完整的视频流。 + +### 参数 + +- shortest 最短输入终止时强制终止。 默认值为0。 +- repeatlast 流结束后,继续应用最后一个 CLUT。值为 0 时,将在到达 CLUT 的最后一帧后禁用过滤器。默认值为 1。 + +### 示例 + +不懂,略。 + +## hflip + +> https://ffmpeg.org/ffmpeg-filters.html#hflip + +水平翻转输入视频。 + +### 参数 + +无。 + +### 示例 + +```python +_ = input(src).hflip().output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]hflip[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_hflip.mp4 -y -hide_banner +[0.3542s] +``` + +#### 对比 + +[视频对比链接] + +## histeq + +> https://ffmpeg.org/ffmpeg-filters.html#histeq + +该过滤器在每帧的基础上应用全局颜色直方图均衡。 + +它可用于校正像素强度压缩范围内的视频。 滤镜会重新分配像素强度,以使它们在强度范围内的分布相等。 可以将其视为“自动调整对比度滤镜”。 此过滤器仅在校正降级或捕获不良的源视频时有用。 + +### 参数 + +- strength 确定要应用的均衡量。随着强度降低,像素强度的分布越来越接近输入帧的分布。该值必须是 [0,1] 范围内的浮点数,默认为 0.200。 +- intensity 设置可以产生的最大强度,并适当缩放输出值。强度应根据需要设置,然后可以根据需要限制强度,以免冲洗。该值必须是 [0,1] 范围内的浮点数,默认为 0.210。 +- antibanding 设置防束缚水平。如果启用,滤镜将随机少量改变输出像素的亮度,以避免直方图出现条纹。可能的值是 none,weak 或 strong。默认为无。 + +### 示例 + +略。 + +## histogram + +> https://ffmpeg.org/ffmpeg-filters.html#histogram + +计算并绘制输入视频的颜色分布直方图。 + +所计算的直方图是图像中颜色分量分布的表示。 + +标准直方图显示图像中的颜色成分分布。显示每个颜色成分的颜色图。根据输入格式显示当前帧中 Y,U,V,A 或 R,G,B 分量的分布。在每个图的下方显示了一个颜色分量刻度表。 + +### 参数 + +- level_height +- scale_height +- display_mode +- levels_mode +- components +- fgopacity +- bgopacity + +### 示例 + +```python +_ = input(src).histogram(level_height=250).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]histogram=level_height=250[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_histogram.mp4 -y -hide_banner +[0.4159s] +``` + +#### 对比 + +[视频对比链接] + +## hqdn3d + +> https://ffmpeg.org/ffmpeg-filters.html#hqdn3d + +这是一个高精度 / 高质量的 3D 降噪滤波器。它的目的是减少图像噪点,产生平滑图像,并使静止图像真正静止。它应该增强可压缩性。 + +### 参数 + +- luma_spatial 一个非负浮点数,用于指定空间亮度强度。默认为 4.0。 +- chroma_spatial 一个非负浮点数,用于指定空间色度强度。默认为 3.0 * luma_spatial / 4.0。 +- luma_tmp 一个浮点数,它指定亮度时间强度。默认为 6.0 * luma_spatial / 4.0。 +- chroma_tmp 一个浮点数,指定色度时间强度。默认为 luma_tmp * chroma_spatial / luma_spatial。 + +### 示例 + +略。 + +## hwdownload + +> https://ffmpeg.org/ffmpeg-filters.html#hwdownload + +将硬件帧下载到系统内存中。 + +输入必须为硬件帧,输出必须为非硬件格式。 并非所有格式都支持输出-可能需要在图形后立即插入一个附加格式过滤器,以获取受支持格式的输出。 + +### 参数 + +无。 + +### 示例 + +略。 + +## hwmap + +> https://ffmpeg.org/ffmpeg-filters.html#hwmap + +将硬件帧映射到系统内存或另一个设备。 + +该过滤器具有几种不同的操作模式。 使用哪种格式取决于输入和输出格式: + +- 硬件帧输入,普通帧输出:将输入帧映射到系统内存,然后将其传递到输出。如果以后需要原始硬件帧(例如,在其上覆盖了其他内容),则可以在下一个模式下再次使用 hwmap 过滤器来检索它。 +- 普通帧输入,硬件帧输出:如果输入实际上是一个软件映射的硬件帧,则取消映射 - 即返回原始硬件帧。否则,必须提供设备。在该设备上为输出创建新的硬件表面,然后将它们映射回输入端的软件格式,并将这些帧提供给前面的过滤器。然后,这将类似于 hwupload 过滤器,但是当输入已经采用兼容格式时,它可能能够避免其他副本。 +- 硬件帧输入和输出:必须直接或使用 derive_device 选项为输出提供设备。输入和输出设备必须具有不同的类型并且兼容 - 确切的含义取决于系统,但是通常这意味着它们必须引用相同的基础硬件上下文(例如,引用相同的图形卡)。如果输入帧最初是在输出设备上创建的,则取消映射以检索原始帧。否则,将帧映射到输出设备 - 在输出上创建与输入中的帧相对应的新硬件帧。 + +### 参数 + +- mode 设置帧映射模式。可以是以下模式的组合(默认 read+write): + - read 映射的帧应该是可读的。 + - write 映射的帧应该是可写的。 + - overwrite 映射将始终覆盖整个帧。在某些情况下,这可能会提高性能,因为不需要加载帧的原始内容。 + - direct 映射不包含任何复制。在某些情况下,可能无法创建到帧副本的间接映射,或者无法进行直接映射,否则将具有意外的属性。 设置此标志可确保映射是直接的,并且如果不可能的话将失败。 +- derive_device type 与其使用初始化时提供的设备,不如从输入帧所在的设备中派生类型类型的新设备。 +- reverse 在硬件到硬件的映射中,反向映射-在接收器中创建帧并将其映射回源。 在某些情况下,如果需要在一个方向上进行映射,但是所使用的设备仅支持相反的方向,则这可能是必需的。此选项很危险-如果该过滤器的输出有任何其他限制,它可能会以不确定的方式破坏前面的过滤器。 在不完全了解其使用含义的情况下,请勿使用它。 + +### 示例 + +略。 + +## hwupload + +> https://ffmpeg.org/ffmpeg-filters.html#hwupload + +将系统内存帧上传到硬件上。 + +过滤器初始化时必须提供要上传的设备。如果使用 ffmpeg,请使用 -filter_hw_device 选项或 validate_device 选项选择适当的设备。输入和输出设备必须具有不同的类型并且兼容 - 确切的含义取决于系统,但是通常这意味着它们必须引用相同的基础硬件上下文(例如,引用相同的图形卡)。 + +### 参数 + +- derive_device type 与其使用初始化时提供的设备,不如从输入帧所在的设备中派生类型类型的新设备。 + +### 示例 + +略。 + +## hwupload_cuda + +> https://ffmpeg.org/ffmpeg-filters.html#hwupload_cuda + +将系统内存帧上传到 CUDA 设备。 + +### 参数 + +- device 要使用的 CUDA 设备的编号 + + +### 示例 + +```python +_ = input(src).hwupload_cuda(0).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]hwupload_cuda=0[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_hwupload_cuda.mp4 -y -hide_banner +[0.5929s] +``` + +#### 对比 + +命令是成功的,但不知道有什么用。 + +## hqx + +> https://ffmpeg.org/ffmpeg-filters.html#hqx + +应用专为像素艺术设计的高质量放大滤镜。 + +### 参数 + +- n 设置缩放比例:hq2x 为 2,hq3x 为 3,hq4x 为 4。默认值为 3。 + +### 示例 + +```python +_ = input(src).hqx().output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]hqx[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_hqx.mp4 -y -hide_banner +[1.6528s] +``` + +#### 对比 + +肉眼看不出明显区别。 + +## hstack + +> https://ffmpeg.org/ffmpeg-filters.html#hstack + +水平堆叠输入视频。 + +所有流必须具有相同的像素格式和相同的高度。 + +请注意,此过滤器比使用覆盖和填充过滤器创建相同的输出要快。 + +### 参数 + +- inputs 设置输入流的数量。预设值为 2。 +- shortest 如果设置为 1,则在最短输入终止时强制输出终止。预设值为 0。 + +### 示例 + +```python +_ = vfilters.hstack(input(media_v0), input(media_v1), inputs=2, shortest=0).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\1.mp4 -filter_complex "[0][1]hstack=inputs=2:shortest=0[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_hstack.mp4 -y -hide_banner +[0.5299s] +``` + +#### 对比 + +[视频对比链接] + +#### 处理速度比较 + +以下三种方法结果相同: + +```python +vtools.hstack_videos(dst, v1, v1) +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\v1.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\v1.mp4 -filter_complex "[0][1]hstack=inputs=2:shortest=0[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\videos_hstack.mp4 -y -hide_banner +[14.1295s] +``` + +```python +vtools.compare_2_videos(v1, v1, dst) +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\v1.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\v1.mp4 -filter_complex "[0]pad=w=2*iw[tag0];[tag0][1]overlay=x=w[tag1]" -vcodec h264_nvenc -map [tag1] C:\Users\Admin\Videos\transform\videos_hstack.mp4 -y -hide_banner +[26.6111s] +``` + +```python +vtools.side_by_side_2_videos(v1, v1, dst) +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\v1.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\v1.mp4 -filter_complex "[0][1]framepack=format=sbs[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\videos_hstack.mp4 -y -hide_banner +[15.9902s] +``` + +确实此滤镜更快一些,之后都采用此滤镜进行水平并排比较视频的处理。 + +## hue + +> https://ffmpeg.org/ffmpeg-filters.html#hue + +修改输入的色相和/或饱和度。 + +### 参数 + +- h 将色相角指定为度数。它接受一个表达式,默认为“0”。 +- s 在 [-10,10] 范围内指定饱和度。它接受一个表达式,默认为“1”。 +- H 将色相角指定为弧度数。它接受一个表达式,默认为“0”。 +- b 在 [-10,10] 范围内指定亮度。它接受一个表达式,默认为“0”。 + +h 和 H 是互斥的,不能同时指定。b,h,H 和 s 选项值是包含以下常量的表达式: + +- n 输入帧的帧数从 0 开始 +- pts 输入帧的表示时间戳记,以时基单位表示 +- r 输入视频的帧速率,如果输入帧速率未知,则为 NAN +- t 以秒为单位的时间戳,如果输入的时间戳未知,则为 NAN +- tb 输入视频的时基 + +### 示例 + +#### 设置色相和饱和度 + +```python +_ = input(src).hue(h=90, s=1).output(dst).run() +# 等价 +_ = input(src).hue(H="PI/2", s=1).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]hue=h=90:s=1[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_hue1.mp4 -y -hide_banner +[0.3754s] +``` + +#### 对比 + +[视频对比链接] + +#### 旋转色调并使饱和度在 1 秒钟内在 0 和 2 之间摆动 + +```python +_ = input(src).hue(H="2*PI*t", s="sin(2*PI*t)+1").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]hue=H=2*PI*t:s=sin(2*PI*t)+1[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_hue2.mp4 -y -hide_banner +[0.4243s] +``` + +#### 对比 + +[视频对比链接] + +#### 从 0 开始应用 3 秒的饱和淡入效果 + +```python +_ = input(src).hue(s="min(t/3,1)").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]hue=s=min(t/3\,1)[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_hue3.mp4 -y -hide_banner +[0.4264s] +``` + +一般的淡入表达式可以写成: + +```python +# _ = input(src).hue(s="min(0,max((t-START)/DURATION,1))").output(dst).run() +_ = input(src).hue(s="min(0,max((t-0)/3,1))").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]hue=s=min(0\,max((t-0)/3\,1))[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_hue3.mp4 -y -hide_banner +[0.3993s] +``` + +#### 对比 + +[视频对比链接] + +#### 从 2 秒开始应用 2 秒的饱和淡出效果 + +```python +_ = input(src).hue(s="max(0, min(1, (4-t)/2))").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]hue=s=max(0\, min(1\, (4-t)/2))[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_hue4.mp4 -y -hide_banner +[0.3581s] +``` + +一般的淡出表达式可以写成: + +```python +# _ = input(src).hue(s="max(0, min(1, (START+DURATION-t)/DURATION))").output(dst).run() +``` + +#### 对比 + +[视频对比链接] + +## hysteresis + +> https://ffmpeg.org/ffmpeg-filters.html#hysteresis + +通过连接组件将第一流增长为第二流。 这使得构建更坚固的边缘掩模成为可能。 + +### 参数 + +- planes 设置哪些通道将作为位图处理,未处理的通道将从第一个流复制。 默认值为 0xf,将处理所有通道。 +- threshold 设置用于过滤的阈值。 如果像素分量值高于此值,则会激活用于连接分量的过滤器算法。默认值是 0。 + +### 示例 + +不懂,略。 + +## identity + +> https://ffmpeg.org/ffmpeg-filters.html#identity + +获取两个输入视频之间的身份分数。 + +该过滤器可拍摄两个输入视频。 + +两个输入视频必须具有相同的分辨率和像素格式,此过滤器才能正常工作。 还假定两个输入具有相同数量的帧,将它们一一比较。 + +通过记录系统打印获得的每个组件,平均,最小和最大同一性得分。 + +过滤器将计算出的每个帧的身份分数存储在帧元数据中。 + +### 参数 + +无。 + +### 示例 + +不懂,略。 + +## idet + +> https://ffmpeg.org/ffmpeg-filters.html#idet + +检测视频隔行扫描类型。 + +该过滤器尝试检测输入帧是隔行,逐行,顶场还是底场优先。 它还将尝试检测在相邻帧之间重复的场(电视电影的标志)。 + +单帧检测在对每个帧进行分类时仅考虑紧邻的帧。 多帧检测合并了先前帧的分类历史。 + +过滤器将记录以下元数据值: + +- single.current_frame 使用单帧检测检测到当前帧的类型。以下之一:“tff”(顶部字段优先),“bff”(底部字段优先),“progressive”(渐进)或“undefined”(不确定) +- single.tff 首先使用单帧检测将累积帧数检测为顶场。 +- multiple.tff 首先使用多帧检测将累积帧数检测为顶场。 +- single.bff 首先使用单帧检测检测为底场的累积帧数。 +- multiple.current_frame 使用多帧检测检测到当前帧的类型。以下之一:“tff”(顶部字段优先),“bff”(底部字段优先),“progressive”(渐进)或“undefined”(不确定) +- multiple.bff 首先使用多帧检测将累积帧数检测为底场。 +- single.progressive 使用单帧检测将累积帧检测为渐进帧。 +- multiple.progressive 使用多帧检测将累积帧检测为渐进帧。 +- single.undetermined 使用单帧检测无法分类的累计帧数。 +- multiple.undetermined 无法使用多帧检测分类的累计帧数。 +- repeated.current_frame 从最后一帧开始重复当前帧中的哪个字段。“neither”,“top” 或 “bottom” 之一。 +- repeated.neither 没有重复字段的累计帧数。 +- repeated.top 累积帧数,其中顶场从上一帧的顶场开始重复。 +- repeated.bottom 累积帧数,其中下场从上一帧的下场开始重复。 + +### 参数 + +- intl_thres 设置隔行扫描阈值。 +- prog_thres 设置渐进阈值。 +- rep_thres 重复现场检测的阈值。 +- half_life 给定帧对统计的贡献减半后的帧数(即其对分类的贡献仅为 0.5)。默认值为 0 表示所看到的所有帧将永远获得 1.0 的总权重。 +- analyze_interlaced_flag 如果该值不为 0,则 idet 将使用指定的帧数来确定隔行标志是否正确,它将不计算未确定的帧。如果发现标志是正确的,则将不做任何进一步的计算就使用该标志,如果发现它是不正确的,则将不进行任何进一步的计算就将其清除。这样可以将 idet 滤波器作为一种低计算量的方法来清除隔行标志 + +### 示例 + +不懂,略。 + +## il + +> https://ffmpeg.org/ffmpeg-filters.html#il + +解交织或交织场。 + +该过滤器允许人们处理隔行扫描的图像场而无需对它们进行隔行扫描。 去交织将输入帧分为2个场(所谓的半图片)。 奇数行移动到输出图像的上半部分,偶数行移动到下半部分。 您可以独立处理(过滤)它们,然后重新交织它们。 + +### 参数 + +- luma_mode, l +- chroma_mode, c +- alpha_mode, a + - ‘none’ 默认 + - ‘deinterleave, d’ 解交织场,将一个放置在另一个上方。 + - ‘interleave, i’ 交织字段。 反转去交织的效果。 +- luma_swap, ls +- chroma_swap, cs +- alpha_swap, as 交换亮度/色度/ alpha字段。 交换偶数和奇数行。 预设值为 0。 + +### 示例 + +不懂,略。 + +## inflate + +> https://ffmpeg.org/ffmpeg-filters.html#inflate + +对视频应用膨胀效果。 + +该过滤器通过仅考虑高于像素的值,将像素替换为 local(3x3)平均值。 + +### 参数 + +- threshold0 +- threshold1 +- threshold2 +- threshold3 限制每个通道的最大变化,默认值为 65535。如果为 0,则通道将保持不变。 + +### 示例 + +不懂,略。 + +## interlace + +> https://ffmpeg.org/ffmpeg-filters.html#interlace + +简单的隔行过滤器,可处理逐行内容。 这样可以将奇数帧的上(或下)行与偶数帧的下(或上)行交织,从而将帧速率减半,并保留了图像高度。 + +``` + Original Original New Frame + Frame 'j' Frame 'j+1' (tff) + ========== =========== ================== + Line 0 --------------------> Frame 'j' Line 0 + Line 1 Line 1 ----> Frame 'j+1' Line 1 + Line 2 ---------------------> Frame 'j' Line 2 + Line 3 Line 3 ----> Frame 'j+1' Line 3 + ... ... ... +New Frame + 1 will be generated by Frame 'j+2' and Frame 'j+3' and so on +``` + +### 参数 + +- scan 这确定是从逐行帧的偶数(tff- 默认值)还是奇数(bff)行中获取隔行帧。 +- lowpass 垂直低通滤波器,可避免 Twitter 隔行扫描并减少莫尔条纹。 + - ‘0, off’ 禁用垂直低通滤波器 + - ‘1, linear’ 启用线性过滤器(默认) + - ‘2, complex’ 启用复杂过滤器。 这将略微减少 Twitter 和波纹,但更好地保留细节和主观清晰度 + +### 示例 + +不懂,略。 + +## kerndeint + +> https://ffmpeg.org/ffmpeg-filters.html#kerndeint + +通过应用自适应内核解交织来对输入视频进行解交织。 对视频的隔行扫描部分进行处理以产生逐行帧。 + +### 参数 + +- thresh 设置阈值,该阈值在确定是否必须处理像素线时会影响滤镜的容限。它必须是 [0,255] 范围内的整数,默认值为 10。值 0 将导致在每个像素上应用该过程。 +- map 如果设置为 1,则将超出阈值的像素绘制为白色。默认值为 0。 +- order 设置字段顺序。如果设置为 1,则交换字段,如果为 0,则保留字段。默认为 0。 +- sharp 如果设置为 1,则启用其他锐化。默认值为 0。 +- twoway 如果设置为 1,则启用双向锐化。默认值为 0。 + +### 示例 + +不懂,略。 + +## kirsch + +> https://ffmpeg.org/ffmpeg-filters.html#kirsch + +对输入视频流应用 kirsch 运算符。 + +### 参数 + +- planes 设置将要处理的通道,将复制未处理的通道。 默认值为 0xf,将处理所有通道。 +- scale 设置值将与过滤结果相乘。 +- delta 设置将被添加到过滤结果中的值。 + +### 示例 + +不懂,略。 + +## lagfun + +> https://ffmpeg.org/ffmpeg-filters.html#lagfun + +慢慢更新较暗的像素。 + +此滤镜使短时间的闪光看起来更长。 + +### 参数 + +- decay 设置衰减因子。默认值为 0.95。允许范围是 0 到 1。 +- planes 设置要过滤的通道。默认为全部。允许范围是 0 到 15。 + +### 示例 + +不懂,略。 + +## lenscorrection + +> https://ffmpeg.org/ffmpeg-filters.html#lenscorrection + +校正径向镜片变形 + +该滤镜可用于校正由于使用广角镜而引起的径向变形,从而重新校正图像。为了找到正确的参数,可以使用一些可用的工具,例如,作为 opencv 的一部分或只是反复试验。要使用 opencv,请使用来自 opencv 源的校准样本(在 samples / cpp 下),并从所得矩阵中提取 k1 和 k2 系数。 + +注意,实际上,KDE 项目的开源工具 Krita 和 Digikam 中提供了相同的过滤器。 + +与也可以用来补偿镜头误差的晕影滤镜相比,此滤镜可以校正图像的失真,而晕影滤镜可以校正亮度分布,因此在某些情况下,您可能希望将两个滤镜一起使用,但必须注意顺序,即在镜片校正之前还是之后都要进行渐晕。 + +### 参数 + +- cx 图像焦点的相对 x 坐标,从而导致畸变的中心。该值的范围为 [0,1],并表示为图像宽度的分数。默认值为 0.5。 +- cy 图像焦点的相对 y 坐标,从而失真的中心。该值的范围为 [0,1],并表示为图像高度的分数。默认值为 0.5。 +- k1 二次校正项的系数。该值的范围为 [-1,1]。0 表示无校正。默认值为 0。 +- k2 双二次校正项的系数。该值的范围为 [-1,1]。0 表示无校正。默认值为 0。 +- i 设置插补类型。可以是最近的或双线性的。默认值是最近的。 +- fc 指定未映射像素的颜色。默认黑色。 + +### 示例 + +不懂,略。 + +## lensfun + +> https://ffmpeg.org/ffmpeg-filters.html#lensfun + +通过 lensfun 库应用镜头校正。 + +lensfun 滤镜需要相机品牌,相机型号和镜头型号才能应用镜头校正。筛选器将加载 lensfun 数据库并对其进行查询,以在数据库中找到相应的相机和镜头条目。只要可以使用给定的选项找到这些条目,过滤器就可以对帧进行校正。请注意,不完整的字符串将导致滤镜选择与给定选项最匹配的滤镜,并且滤镜将输出所选的相机和镜头型号(记录为“信息”级)。您必须提供所需的品牌,相机型号和镜头型号。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## libvmaf + +> https://ffmpeg.org/ffmpeg-filters.html#libvmaf + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## limiter + +> https://ffmpeg.org/ffmpeg-filters.html#limiter + +将像素分量值限制在指定范围 [min, max]。 + +### 参数 + +- min 默认为输入的最低允许值。 +- max 默认为输入的最大允许值。 +- planes 设置处理通道,默认全部。 + +### 示例 + +略。 + +## loop + +> https://ffmpeg.org/ffmpeg-filters.html#loop + +循环播放视频帧。 + +### 参数 + +- loop 设置循环数。将此值设置为 -1 将导致无限循环。默认值为 0。 +- size 以帧数设置最大尺寸。默认值为 0。 +- start 设置循环的第一帧。默认值为 0。 + +### 示例 + +```python +_ = input(src).loop(loop=-1, size=1, start=1).output(dst, duration=5).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]loop=loop=-1:size=1:start=1[tag0]" -t 5 -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_loop.mp4 -y -hide_banner +[0.4211s] +``` + +#### 对比 + +[视频对比链接] + +## lut1d + +> https://ffmpeg.org/ffmpeg-filters.html#lut1d + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## lut3d + +> https://ffmpeg.org/ffmpeg-filters.html#lut3d + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## lumakey + +> https://ffmpeg.org/ffmpeg-filters.html#lumakey + +将某些亮度值转换为透明度。 + +### 参数 + +- threshold 设置将用作透明度基础的亮度。预设值为 0。 +- tolerance 设置要键入的亮度值范围。默认值为 0.01。 +- softness 设置柔软度范围。默认值为 0。使用此值可控制从零到完全透明的渐变。 + +### 示例 + +```python +_ = input(src).lumakey(threshold=0.01, tolerance=0.01).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]lumakey=threshold=0.01:tolerance=0.01[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_lumakey.mp4 -y -hide_banner +[0.3720s] +``` + +#### 对比 + +[视频对比链接] + +## lut, lutrgb, lutyuv + +> https://ffmpeg.org/ffmpeg-filters.html#lutyuv + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## lut2, tlut2 + +> https://ffmpeg.org/ffmpeg-filters.html#tlut2 + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## maskedclamp + +> https://ffmpeg.org/ffmpeg-filters.html#maskedclamp + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## maskedmax + +> https://ffmpeg.org/ffmpeg-filters.html#maskedmax + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## maskedmerge + +> https://ffmpeg.org/ffmpeg-filters.html#maskedmerge + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## maskedmin + +> https://ffmpeg.org/ffmpeg-filters.html#maskedmin + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## maskedthreshold + +> https://ffmpeg.org/ffmpeg-filters.html#maskedthreshold + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## maskfun + +> https://ffmpeg.org/ffmpeg-filters.html#maskfun + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## mcdeint + +> https://ffmpeg.org/ffmpeg-filters.html#mcdeint + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## median + +> https://ffmpeg.org/ffmpeg-filters.html#median + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## mergeplanes + +> https://ffmpeg.org/ffmpeg-filters.html#mergeplanes + +合并来自多个视频流的颜色通道分量。 + +滤镜最多可接收 4 个输入流,并将选定的输入通道合并到输出视频。 + +### 参数 + +- mapping 将输入设置为输出通道映射。默认值为 0。映射被指定为位图。应将其指定为十六进制数,格式为 0xAa [Bb [Cc [Dd]]]。“Aa”描述了输出流第一个通道的映射。“A”设置要使用的输入流的编号(从 0 到 3),“a”设置要使用的相应输入的通道编号(从 0 到 3)。其余映射相似,“Bb”描述输出流第二通道的映射,“Cc”描述输出流第三通道的映射,“Dd”描述输出流第四通道的映射。 +- format 设置输出像素格式。默认值为 yuva444p。 + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## mestimate + +> https://ffmpeg.org/ffmpeg-filters.html#mestimate + +使用块匹配算法估计和导出运动矢量。 运动矢量存储在帧侧数据中,以供其他过滤器使用。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## midequalizer + +> https://ffmpeg.org/ffmpeg-filters.html#midequalizer + +使用两个视频流应用中途图像均衡效果。 + +中途图像均衡可将一对图像调整为具有相同的直方图,同时尽可能保持其动态。 这对于例如 匹配一对立体声相机的曝光。 + +该滤波器具有两个输入和一个输出,它们必须具有相同的像素格式,但大小可能不同。 滤波器的输出首先通过两个输入的中间直方图进行调整。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## minterpolate + +> https://ffmpeg.org/ffmpeg-filters.html#minterpolate + +使用运动插值将视频转换为指定的帧速率。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## mix + +> https://ffmpeg.org/ffmpeg-filters.html#mix + +将几个视频输入流混合到一个视频流中。 + +### 参数 + +- inputs 输入数量。如果未指定,则默认为 2。 +- weights 指定每个输入视频流的权重作为顺序。每个砝码之间都用空格隔开。如果权重数量小于帧数,则最后指定的权重将用于所有剩余的未设置权重。 +- scale 指定比例,如果设置,它将乘以每个权重的总和再乘以像素值,以得到最终的目标像素值。默认情况下,比例会自动缩放为权重之和。 +- duration 指定如何确定流的结尾。 + - ‘ longest ’ 最长输入的持续时间。(默认) + - ‘ shortest ’ 最短输入的持续时间。 + - ‘ first ’ 第一次输入的持续时间。 + +### 示例 + +```python +_ = vfilters.mix(input(media_v0), input(media_v1), input(media_v2),inputs=3).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\1.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\2.mp4 -filter_complex "[0][1][2]mix=inputs=3[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_mix.mp4 -y -hide_banner +[0.9549s] +``` + +#### 对比 + +[视频对比链接] + +## monochrome + +> https://ffmpeg.org/ffmpeg-filters.html#monochrome + +使用自定义滤色器将视频转换为灰色。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## mpdecimate + +> https://ffmpeg.org/ffmpeg-filters.html#mpdecimate + +丢弃与前一帧相差无几的帧,以降低帧速率。 + +此过滤器的主要用途是用于非常低比特率的编码(例如,通过拨号调制解调器进行流式传输),但从理论上讲,它可以用于修复反向电视连接不正确的电影。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## msad + +> https://ffmpeg.org/ffmpeg-filters.html#msad + +获取两个输入视频之间的 MSAD(绝对差的平均值)。 + +该过滤器可拍摄两个输入视频。 + +两个输入视频必须具有相同的分辨率和像素格式,此过滤器才能正常工作。还假定两个输入具有相同数量的帧,将它们一一比较。 + +通过测井系统打印获得的每组分,平均,最小和最大 MSAD。 + +过滤器将计算出的每个帧的 MSAD 存储在帧元数据中。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## negate + +> https://ffmpeg.org/ffmpeg-filters.html#negate + +对输入视频取反色。 + +### 参数 + +- negate_alpha 值为 1 时,它会否定 alpha 分量(如果存在)。预设值为 0。 + +### 示例 + +```python +_ = input(src).negate(negate_alpha=False).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]negate=negate_alpha=False[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_negate.mp4 -y -hide_banner +[0.4209s] +``` + +#### 对比 + +[视频对比链接] + +## nlmeans + +> https://ffmpeg.org/ffmpeg-filters.html#nlmeans + +使用非局部均值算法对帧进行消噪。 + +通过寻找具有相似上下文的其他像素来调整每个像素。 通过比较它们周围大小为pxp的补丁来定义此上下文相似性。 在像素周围的rxr区域中搜索补丁。 + +请注意,研究区域定义了色块的中心,这意味着某些色块将由该研究区域之外的像素组成。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## nnedi + +> https://ffmpeg.org/ffmpeg-filters.html#nnedi + +使用神经网络边沿定向插值对视频进行去交织。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## noformat + +> https://ffmpeg.org/ffmpeg-filters.html#noformat + +强制 libavfilter 不要将任何指定的像素格式用于下一个过滤器的输入。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## noise + +> https://ffmpeg.org/ffmpeg-filters.html#noise + +在视频输入帧上添加噪点。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## normalize + +> https://ffmpeg.org/ffmpeg-filters.html#normalize + +标准化 RGB 视频(又名直方图拉伸,对比度拉伸)。 + +对于每帧的每个通道,滤波器都会计算输入范围并将其线性映射到用户指定的输出范围。输出范围默认为从纯黑色到纯白色的整个动态范围。 + +可以在输入范围上使用时间平滑,以减少当小的深色或明亮的物体进入或离开场景时引起的闪烁(亮度的快速变化)。这类似于摄像机上的自动曝光(自动增益控制),并且像摄像机一样,它可能会导致一段时间的视频过度曝光或曝光不足。 + +RGB 通道可以独立进行归一化,这可能会导致某些颜色偏移,或者可以将它们链接为单个通道,从而防止出现颜色偏移。链接归一化保留色调。独立归一化没有,因此可以用来消除某些偏色。独立和链接的归一化可以任意比例组合。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## null + +> https://ffmpeg.org/ffmpeg-filters.html#null + +不变地将视频源传递到输出。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## ocr + +> https://ffmpeg.org/ffmpeg-filters.html#ocr + +光学字符识别。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## ocv + +> https://ffmpeg.org/ffmpeg-filters.html#ocv + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## oscilloscope + +> https://ffmpeg.org/ffmpeg-filters.html#oscilloscope + +2D视频示波器。 + +对测量空间冲动,阶跃响应,色度延迟等有用。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## overlay + +> https://ffmpeg.org/ffmpeg-filters.html#overlay + +将一个视频叠加在另一个视频上。它有两个输入,只有一个输出。 **第一个输入**是第二个输入叠加在其上的“主”视频。 + +### 参数 + +- x +- y 设置主视频上叠加视频的 x 和 y 坐标表达式。两个表达式的默认值为“0”。如果表达式无效,则将其设置为一个很大的值(这意味着该叠加层将不会显示在输出可见区域内)。 +- eof_action +- eval 在计算 x 和 y 的表达式时设置。 + - ‘init’ 在过滤器初始化期间或处理命令时仅对表达式求值一次 + - ‘frame’ 计算每个传入帧的表达式 +- shortest +- format 设置输出视频的格式。 + - ‘yuv420’ + - ‘yuv420p10’ + - ‘yuv422’ + - ‘yuv422p10’ + - ‘yuv444’ + - ‘rgb’ + - ‘gbrp’ + - ‘auto’ +- repeatlast +- alpha 设置叠加视频的 alpha 格式,它可以是 straight 的或 premultiplied 的。 默认为 straight。 + +x 和 y 表达式可以包含以下参数: + +- main_w, W +- main_h, H 主要输入宽度和高度。 +- overlay_w, w +- overlay_h, h 叠加层输入的宽度和高度。 +- x +- y x 和 y 的计算值。为每个新帧评估它们。 +- hsub +- vsub 输出格式的水平和垂直色度子样本值。例如,对于像素格式“yuv422p”,hsub 为 2,vsub 为 1。 +- n 输入帧的数量,从 0 开始 +- pos 输入框在文件中的位置,如果未知,则为 NAN +- t 时间戳,以秒为单位。如果输入的时间戳未知,则为 NAN。 + +### 示例 + +#### 顶部与右边 + +```python +_ = vfilters.overlay(mv1, mv2, "main_w-overlay_w-50", "main_h-overlay_h-50").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\1.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\2.mp4 -filter_complex "[0][1]overlay=x=main_w-overlay_w-50:y=main_h-overlay_h-50[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_overlay1.mp4 -y -hide_banner +[0.6054s] +``` + +#### 对比 + +[视频对比链接] + +#### 添加图片水印 + +```python +_ = vfilters.overlay(mv1, logo, 10, "main_h-overlay_h-10").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\1.mp4 -i testdata\i3.png -filter_complex "[1]scale=h=100:w=100[tag0];[0][tag0]overlay=x=10:y=main_h-overlay_h-10[tag1]" -vcodec h264_nvenc -map [tag1] C:\Users\Admin\Videos\transform\v0_overlay2.mp4 -y -hide_banner +[0.3607s] +``` + +#### 对比 + +[视频对比链接] + +#### 添加多个图片水印 + +```python +_ = mv1.overlay(logos[0], 10, "H-h-10").overlay(logos[1], "W-w-10", "H-h-10").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\1.mp4 -i testdata\i3.png -filter_complex "[1]scale=h=100:w=100[tag0];[tag0]split=2[tag1][tag2];[0][tag1]overlay=x=10:y=H-h-10[tag3];[tag3][tag2]overlay=x=W-w-10:y=H-h-10[tag4]" -vcodec h264_nvenc -map [tag4] C:\Users\Admin\Videos\transform\v0_overlay3.mp4 -y -hide_banner +[0.3933s] +``` + +#### 对比 + +[视频对比链接] + +## overlay_cuda + +> https://ffmpeg.org/ffmpeg-filters.html#overlay_cuda + +同上,但只支持 CUDA 帧。 + +## owdenoise + +> https://ffmpeg.org/ffmpeg-filters.html#owdenoise + +Overcomplete Wavelet 降噪器。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## pad + +> https://ffmpeg.org/ffmpeg-filters.html#pad + +将填充物添加到输入图像,并将原始输入放置在提供的 x,y 坐标处。 + +### 参数 + +- width, w +- height, h 为输出图像的大小指定一个表达式,其中添加了填充。 如果 width 或 height 的值为 0,则将相应的输入大小用于输出。宽度表达式可以引用由高度表达式设置的值,反之亦然。宽度和高度的默认值为 0。 +- x +- y 指定偏移量,以相对于输出图像的顶部/左侧边界将输入图像放置在填充区域内。x 表达式可以引用 y 表达式设置的值,反之亦然。x 和 y 的默认值为 0。如果 x 或 y 的值为负数,则会对其进行更改,以使输入图像位于填充区域的中心。 +- color 指定填充区域的颜色。默认黑色。 +- eval 指定何时计算宽度,高度,x 和 y 表达式。 + - ‘init’ 在过滤器初始化期间或处理命令时,仅对表达式求值一次。默认。 + - ‘frame’ 计算每个传入帧的表达式。 +- aspect 填充至指定分辨率。 + +width,height,x 和 y 选项的值是包含以下常量的表达式: +- in_w, iw +- in_h, ih 输入视频的宽度和高度。 +- out_w, ow +- out_h, oh 输出的宽度和高度(填充区域的大小),由 width 和 height 表达式指定。 +- x +- y 由 x 和 y 表达式指定的 x 和 y 偏移量;如果尚未指定,则为 NAN。 +- a iw / ih +- sar 输入样本宽高比 +- dar 输入显示宽高比 (iw / ih) * sar +- hsub +- vsub 水平和垂直色度子样本值。例如,对于像素格式“yuv422p”,hsub 为 2,vsub 为 1。 + +### 示例 + +```python +_ = input(src).pad("3/2*iw", "3/2*ih", "(ow-iw)/2", "(oh-ih)/2").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]pad=3/2*iw:3/2*ih:(ow-iw)/2:(oh-ih)/2[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_pad.mp4 -y -hide_banner +[0.4306s] +``` + +#### 对比 + +[视频对比链接] + +## palettegen + +> https://ffmpeg.org/ffmpeg-filters.html#palettegen + +为整个视频流生成一个调色板。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## paletteuse + +> https://ffmpeg.org/ffmpeg-filters.html#paletteuse + +使用调色板对输入视频流进行下采样。 + +该过滤器有两个输入:一个视频流和一个调色板。 调色板必须是 256 像素的图像。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## perspective + +> https://ffmpeg.org/ffmpeg-filters.html#perspective + +纠正未垂直于屏幕录制的视频的透视图。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## phase + +> https://ffmpeg.org/ffmpeg-filters.html#phase + +将隔行视频延迟一个场时间,以便改变场序。 + +预期用途是将以相反场序拍摄的 PAL 电影固定为电影到视频的传输。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## photosensitivity + +> https://ffmpeg.org/ffmpeg-filters.html#photosensitivity + +减少视频中的各种闪烁,从而帮助癫痫病患者。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## pixdesctest + +> https://ffmpeg.org/ffmpeg-filters.html#pixdesctest + +像素格式描述符测试过滤器,主要用于内部测试。 输出视频应等于输入视频。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## pixscope + +> https://ffmpeg.org/ffmpeg-filters.html#pixscope + +显示颜色通道的样本值。 主要用于检查颜色和级别。 支持的最低分辨率为640x480。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## pp + +> https://ffmpeg.org/ffmpeg-filters.html#pp + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## pp7 + +> https://ffmpeg.org/ffmpeg-filters.html#pp7 + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## premultiply + +> https://ffmpeg.org/ffmpeg-filters.html#premultiply + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## prewitt + +> https://ffmpeg.org/ffmpeg-filters.html#prewitt + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## pseudocolor + +> https://ffmpeg.org/ffmpeg-filters.html#pseudocolor + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## psnr + +> https://ffmpeg.org/ffmpeg-filters.html#psnr + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## pullup + +> https://ffmpeg.org/ffmpeg-filters.html#pullup + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## qp + +> https://ffmpeg.org/ffmpeg-filters.html#qp + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## random + +> https://ffmpeg.org/ffmpeg-filters.html#random + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## readeia608 + +> https://ffmpeg.org/ffmpeg-filters.html#readeia608 + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## readvitc + +> https://ffmpeg.org/ffmpeg-filters.html#readvitc + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## remap + +> https://ffmpeg.org/ffmpeg-filters.html#remap + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## removegrain + +> https://ffmpeg.org/ffmpeg-filters.html#removegrain + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## removelogo + +> https://ffmpeg.org/ffmpeg-filters.html#removelogo + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## repeatfields + +> https://ffmpeg.org/ffmpeg-filters.html#repeatfields + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## reverse + +> https://ffmpeg.org/ffmpeg-filters.html#reverse + +反转视频片段。即倒放。 + +警告:此过滤器需要内存来缓冲整个片段,因此建议进行修剪。 + +### 参数 + +无。 + +### 示例 + +```python +_ = input(src).reverse().output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -hwaccel cuda -vcodec h264_cuvid -i C:\Users\Admin\Videos\transform\v0_reverse.mp4 -filter_complex "[0][1]hstack=inputs=2:shortest=0[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\contrast\v0_reverse_compare.mp4 -y -hide_banner +[0.4597s] +``` + +#### 对比 + +[视频对比链接] + +## rgbashift + +> https://ffmpeg.org/ffmpeg-filters.html#rgbashift + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## roberts + +> https://ffmpeg.org/ffmpeg-filters.html#roberts + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## rotate + +> https://ffmpeg.org/ffmpeg-filters.html#rotate + +以弧度表示的任意角度旋转视频。 + +### 参数 + +- angle, a 设置用于顺时针旋转输入视频的角度的表达式,以弧度表示。负值将导致逆时针旋转。默认情况下,它设置为“0”。对每个帧评估该表达式。 +- out_w, ow 设置输出宽度表达式,默认值为“iw”。在配置期间,该表达式仅被评估一次。 +- out_h, oh 设置输出高度表达式,默认值为“ih”。在配置期间,该表达式仅被评估一次。 +- bilinear 如果设置为 1,则启用双线性插值,值为 0 则将其禁用。预设值为 1。 +- fillcolor, c 设置用于填充旋转图像未覆盖的输出区域的颜色。如果选择特殊值“none”,则不打印背景(例如,如果从未显示背景,则很有用)。默认值为“黑色”。 + +- 角度和输出大小的表达式可以包含以下常量和函数: +- n 输入帧的序号,从 0 开始。在过滤第一帧之前,始终为 NAN。 +- t 输入帧的时间(以秒为单位),在配置过滤器时将其设置为 0。在过滤第一帧之前,始终为 NAN。 +- hsub +- vsub 水平和垂直色度子样本值。例如,对于像素格式“yuv422p”,hsub 为 2,vsub 为 1。 +- in_w, iw +- in_h, ih 输入视频的宽度和高度 +- out_w, ow +- out_h, oh 输出的宽度和高度,即宽度和高度表达式指定的填充区域的大小 +- rotw(a) +- roth(a) 完全包含以弧度旋转的输入视频所需的最小宽度 / 高度。 + +### 示例 + +```python +_ = input(src).rotate("PI/6").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]rotate=PI/6[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_rotate.mp4 -y -hide_banner +[0.4854s] +``` + +#### 对比 + +[视频对比链接] + +## sab + +> https://ffmpeg.org/ffmpeg-filters.html#sab + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## scale + +> https://ffmpeg.org/ffmpeg-filters.html#scale + +缩放输入视频的尺寸。 + +缩放过滤器通过更改输出样本的宽高比,强制输出显示宽高比与输入相同。 + +如果输入图像格式与下一个过滤器请求的格式不同,则比例过滤器会将输入转换为请求的格式。 + +### 参数 + +- width, w +- height, h 设置输出视频尺寸表达式。默认值为输入尺寸。如果 width 或 w 值为 0,则将输入宽度用于输出。如果 height 或 h 值为 0,则将输入高度用于输出。如果其中一个值中只有一个是 -n 且 n> = 1,则比例滤镜将使用一个值来保持输入图像的宽高比,该值是根据其他指定尺寸计算得出的。但是,此后,请确保计算出的尺寸可以被 n 整除,并在必要时调整该值。如果两个值均为 -n 且 n> = 1,则行为将与先前设置为 0 的两个值相同。 +- eval 指定何时计算宽度,高度,x 和 y 表达式。 + - ‘init’ 在过滤器初始化期间或处理命令时,仅对表达式求值一次。默认。 + - ‘frame’ 计算每个传入帧的表达式。 +- interl 设置隔行模式。 + - ‘1’ 强制隔行感知缩放。 + - ‘0’ 不要应用隔行缩放。默认。 + - ‘-1’ 根据源帧是否标记为隔行,选择隔行感知缩放。 +- flags 设置缩放标志。 +- param0, param1 设置缩放算法的输入参数。 +- size, s 设置视频大小。 +- in_color_matrix +- out_color_matrix 设置输入 / 输出 YCbCr 颜色空间类型。这允许自动检测的值被覆盖,并允许强制使用用于输出和编码器的特定值。如果未指定,则色彩空间类型取决于像素格式。 + - ‘auto’ 自动选择。 + - ‘bt709’ + - ‘fcc’ + - ‘bt601’ + - ‘bt470’ + - ‘smpte170m’ + - ‘bt2020’ +- in_range +- out_range 设置输入/输出 YCbCr 采样范围。这允许自动检测的值被覆盖,并允许强制使用用于输出和编码器的特定值。如果未指定,则范围取决于像素格式。可能的值: + - ‘auto/unknown’ + - ‘jpeg/full/pc’ + - ‘mpeg/limited/tv’ +- force_original_aspect_ratio 如有必要,请启用或减少输出视频的宽度或高度,以保持原始的宽高比。可能的值: + - ‘disable’ 按指定比例缩放视频并禁用此功能。 + - ‘decrease’ 如果需要,输出视频的尺寸将自动减小。 + - ‘increase’ 如果需要,输出视频的尺寸将自动增加。此选项的一个有用实例是,当您知道特定设备的最大允许分辨率时,可以使用该分辨率将输出视频限制为该分辨率,同时保持宽高比。例如,设备 A 允许 1280x720 的播放,而您的视频是 1920x800。使用此选项(将其设置为减少)并在命令行中指定 1280x720,则输出为 1280x533。请注意,这与为 w 或 h 指定 -1 有所不同,您仍然需要指定输出分辨率才能使此选项起作用。 +- force_divisible_by 与 force_original_aspect_ratio 一起使用时,请确保输出尺寸(宽度和高度)都可被给定的整数整除。这与在 w 和 h 选项中使用 -n 相似。此选项遵循为 force_original_aspect_ratio 设置的值,相应地增加或减少分辨率。视频的宽高比可能会稍作修改。如果您需要使用 force_original_aspect_ratio 使视频适合或超过定义的分辨率,并且在宽度或高度可分割性方面有编码器限制,则此选项非常方便。 + +w 和 h 选项的值是包含以下常量的表达式: +- in_w, iw +- in_h, ih 输入视频的宽度和高度 +- out_w, ow +- out_h, oh 输出的宽度和高度,即宽度和高度表达式指定的填充区域的大小 +- a iw / ih +- sar 输入样本宽高比 +- dar 输入显示宽高比 (iw / ih) * sar +- hsub +- vsub 水平和垂直色度子样本值。例如,对于像素格式“yuv422p”,hsub 为 2,vsub 为 1。 +- ohsub +- ovsub 水平和垂直输出色度子样本值。例如,对于像素格式“yuv422p”,hsub 为 2,vsub 为 1。 +- n 输入帧的(顺序)编号,从 0 开始。仅适用于 eval = frame。 +- t 输入帧的显示时间戳记,以秒为单位。仅适用于 eval = frame。 +- pos 帧在输入流中的位置(字节偏移),如果此信息不可用和 / 或无意义(例如,在合成视频的情况下),则为 NaN。仅适用于 eval = frame。 + +### 示例 + +```python +_ = input(src).scale("2*iw", "2*ih").output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]scale=h=2*ih:w=2*iw[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_scale.mp4 -y -hide_banner +[0.5439s] +``` + +#### 对比 + +[视频对比链接] + +## scale_npp + +> https://ffmpeg.org/ffmpeg-filters.html#scale_npp + + +### 参数 + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## scale2ref + +> https://ffmpeg.org/ffmpeg-filters.html#scale2ref + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## scroll + +> https://ffmpeg.org/ffmpeg-filters.html#scroll + +以恒定速度水平和/或垂直滚动输入视频。 + +### 参数 + +- horizontal, h 设置水平滚动速度。默认值为 0。允许的范围是 -1 至 1。负值会更改滚动方向。 +- vertical, v 设置垂直滚动速度。默认值为 0。允许的范围是 -1 至 1。负值会更改滚动方向。 +- hpos 设置初始水平滚动位置。默认值为 0。允许的范围是 0 到 1。 +- vpos 设置初始垂直滚动位置。默认值为 0。允许的范围是 0 到 1。 + +### 示例 + +```python +_ = input(src).scroll(h=0.01).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]scroll=h=0.01[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_scroll.mp4 -y -hide_banner +[0.5386s] +``` + +#### 对比 + +[视频对比链接] + +## scdet + +> https://ffmpeg.org/ffmpeg-filters.html#scdet + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## selectivecolor + +> https://ffmpeg.org/ffmpeg-filters.html#selectivecolor + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## separatefields + +> https://ffmpeg.org/ffmpeg-filters.html#separatefields + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## setdar, setsar + +> https://ffmpeg.org/ffmpeg-filters.html#setdar_002c-setsar + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## setfield + +> https://ffmpeg.org/ffmpeg-filters.html#setfield + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## setparams + +> https://ffmpeg.org/ffmpeg-filters.html#setparams + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## shear + +> https://ffmpeg.org/ffmpeg-filters.html#shear + +将剪切变换应用于输入视频。 + +### 参数 + +- shx X 方向上的剪切因子。默认值为 0。允许的范围是 -2 到 2。 +- shy Y 方向上的剪切因子。默认值为 0。允许的范围是 -2 到 2。 +- fillcolor, c 设置用于填充转换后视频未覆盖的输出区域的颜色。默认值为“黑色”。 +- interp 设置插补类型。可以是 bilinear 或 nearest。默认为双线性。 + +### 示例 + +No such filter: 'shear' + +## showinfo + +> https://ffmpeg.org/ffmpeg-filters.html#showinfo + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## showpalette + +> https://ffmpeg.org/ffmpeg-filters.html#showpalette + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## shuffleframes + +> https://ffmpeg.org/ffmpeg-filters.html#shuffleframes + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## shufflepixels + +> https://ffmpeg.org/ffmpeg-filters.html#shufflepixels + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## shuffleplanes + +> https://ffmpeg.org/ffmpeg-filters.html#shuffleplanes + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## signalstats + +> https://ffmpeg.org/ffmpeg-filters.html#signalstats + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## signature + +> https://ffmpeg.org/ffmpeg-filters.html#signature + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## smartblur + +> https://ffmpeg.org/ffmpeg-filters.html#smartblur + +使输入视频模糊而不影响轮廓。 + +### 参数 + +- luma_radius, lr 设置亮度半径。选项值必须是 [0.1,5.0] 范围内的浮点数,该浮点数指定用于模糊图像的高斯滤波器的方差(如果较大,则较慢)。默认值为 1.0。 +- luma_strength, ls 设置亮度强度。选项值必须是在 [-1.0,1.0] 范围内配置浮点数的浮点数。[0.0,1.0] 中包含的值将使图像模糊,而 [-1.0,0.0] 中包含的值将使图像锐化。默认值为 1.0。 +- luma_threshold, lt 设置亮度阈值作为系数,以确定像素是否应该模糊。选项值必须是 [-30,30] 范围内的整数。值为 0 将过滤所有图像,[0,30] 中包含的值将过滤平坦区域,[-30,0] 中包含的值将过滤边缘。预设值为 0。 +- chroma_radius, cr 设置色度半径。选项值必须是 [0.1,5.0] 范围内的浮点数,该浮点数指定用于模糊图像的高斯滤波器的方差(如果较大,则较慢)。默认值为 luma_radius。 +- chroma_strength, cs 设置色度强度。选项值必须是在 [-1.0,1.0] 范围内配置浮点数的浮点数。[0.0,1.0] 中包含的值将使图像模糊,而 [-1.0,0.0] 中包含的值将使图像锐化。默认值为 luma_strength。 +- chroma_threshold, ct 设置用作系数的色度阈值,以确定是否应模糊像素。选项值必须是 [-30,30] 范围内的整数。值为 0 将过滤所有图像,[0,30] 中包含的值将过滤平坦区域,[-30,0] 中包含的值将过滤边缘。默认值为 luma_threshold。 + +如果未显式设置色度选项,则会设置相应的亮度值。 + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## sobel + +> https://ffmpeg.org/ffmpeg-filters.html#sobel + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## spp + +> https://ffmpeg.org/ffmpeg-filters.html#spp + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## sr + +> https://ffmpeg.org/ffmpeg-filters.html#sr + +通过应用基于卷积神经网络的超分辨率方法之一来缩放输入。支持以下模型: + +- Super-Resolution Convolutional Neural Network model (SRCNN). +- Efficient Sub-Pixel Convolutional Neural Network model (ESPCN). + +### 参数 + +- dnn_backend native/tensorflow +- model 设置模型文件的路径,以指定网络体系结构及其参数。请注意,不同的后端使用不同的文件格式。TensorFlow 和 native 后端只能按其格式加载文件。 +- scale_factor 设置 SRCNN 模型的比例因子。允许值为 2、3 和 4。默认值为 2。SRCNN 模型必须使用比例因子,因为它接受使用具有适当比例因子的双三次放大来放大输入。 + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## ssim + +> https://ffmpeg.org/ffmpeg-filters.html#ssim + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## stereo3d + +> https://ffmpeg.org/ffmpeg-filters.html#stereo3d + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## streamselect, astreamselect + +> https://ffmpeg.org/ffmpeg-filters.html#astreamselect + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## subtitles + +> https://ffmpeg.org/ffmpeg-filters.html#subtitles + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## super2xsai + +> https://ffmpeg.org/ffmpeg-filters.html#super2xsai + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## swaprect + +> https://ffmpeg.org/ffmpeg-filters.html#swaprect + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## swapuv + +> https://ffmpeg.org/ffmpeg-filters.html#swapuv + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## tblend + +> https://ffmpeg.org/ffmpeg-filters.html#tblend + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## telecine + +> https://ffmpeg.org/ffmpeg-filters.html#telecine + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## thistogram + +> https://ffmpeg.org/ffmpeg-filters.html#thistogram + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## threshold + +> https://ffmpeg.org/ffmpeg-filters.html#threshold + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## thumbnail + +> https://ffmpeg.org/ffmpeg-filters.html#thumbnail + +在给定的连续帧序列中选择最具代表性的帧。 + +### 参数 + +- n 设置帧批量大小进行分析; 在一组 n 帧中,过滤器将选择其中一个,然后处理下一批 n 帧直到结束。默认值为 100。 + +由于过滤器会跟踪整个帧序列,因此较大的n值将导致较高的内存使用率,因此不建议使用较高的值。 + +### 示例 + +```python +_ = input(src).thumbnail(50).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]thumbnail=100[tag0]" -map [tag0] C:\Users\Admin\Videos\transform\v0_thumbnail_%d.png -y -hide_banner +[0.4287s] +``` + +#### 对比 + +[视频对比链接] + +## tile + +> https://ffmpeg.org/ffmpeg-filters.html#tile + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## tinterlace + +> https://ffmpeg.org/ffmpeg-filters.html#tinterlace + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## tmedian + +> https://ffmpeg.org/ffmpeg-filters.html#tmedian + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## tmidequalizer + +> https://ffmpeg.org/ffmpeg-filters.html#tmidequalizer + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## tmix + +> https://ffmpeg.org/ffmpeg-filters.html#tmix + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## tonemap + +> https://ffmpeg.org/ffmpeg-filters.html#tonemap + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## tpad + +> https://ffmpeg.org/ffmpeg-filters.html#tpad + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## transpose + +> https://ffmpeg.org/ffmpeg-filters.html#transpose + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## transpose_npp + +> https://ffmpeg.org/ffmpeg-filters.html#transpose_npp + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## trim + +> https://ffmpeg.org/ffmpeg-filters.html#trim + +修剪输入,以便输出包含输入的一个连续子部分。只作用于视频流。 + +### 参数 + +- start 指定保留部分的开始时间,即带有时间戳开始的帧将是输出中的第一帧。 +- end 指定要删除的第一帧的时间,即紧接时间戳结束的那一帧之前的帧将是输出中的最后一帧。 +- start_pts 这与开始相同,除了此选项以时基单位而不是秒设置开始时间戳。 +- end_pts 这与结束相同,除了此选项以时基单位而不是秒设置结束时间戳记。 +- duration 输出的最大持续时间(以秒为单位)。 +- start_frame 应该传递到输出的第一帧的编号。 +- end_frame 应该删除的第一帧的编号。 + +开始,结束和持续时间表示为持续时间规范。 + +> https://ffmpeg.org/ffmpeg-utils.html#time-duration-syntax + +请注意,开始 / 结束选项和持续时间选项的前两组看帧时间戳,而 _frame 变体仅对通过过滤器的帧进行计数。另请注意,此过滤器不会修改时间戳。如果希望输出时间戳从零开始,请在调整过滤器之后插入 setpts 过滤器。 + +如果设置了多个开始或结束选项,则此过滤器将尽量保持贪婪并保留所有与至少一个指定约束匹配的帧。要仅保留一次与所有约束匹配的零件,请链接多个 trim 过滤器。 + +默认设置为保留所有输入。因此可以设置,例如只是将所有内容保留在指定时间之前的最终值。 + +### 示例 + +```python +_ = input(src).trim(1, 3).output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]trim=1:3[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_trim.mp4 -y -hide_banner +[0.4053s] +``` + +#### 对比 + +[视频对比链接] + +## unpremultiply + +> https://ffmpeg.org/ffmpeg-filters.html#unpremultiply + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## unsharp + +> https://ffmpeg.org/ffmpeg-filters.html#unsharp + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## untile + +> https://ffmpeg.org/ffmpeg-filters.html#untile + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## uspp + +> https://ffmpeg.org/ffmpeg-filters.html#uspp + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## v360 + +> https://ffmpeg.org/ffmpeg-filters.html#v360 + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## vaguedenoiser + +> https://ffmpeg.org/ffmpeg-filters.html#vaguedenoiser + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## vectorscope + +> https://ffmpeg.org/ffmpeg-filters.html#vectorscope + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## vidstabdetect + +> https://ffmpeg.org/ffmpeg-filters.html#vidstabdetect + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## vidstabtransform + +> https://ffmpeg.org/ffmpeg-filters.html#vidstabtransform + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## vflip + +> https://ffmpeg.org/ffmpeg-filters.html#vflip + +垂直翻转输入视频。 + +### 参数 + +无。 + +### 示例 + +```python +_ = input(src).vflip().output(dst).run() +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0]vflip[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_vflip.mp4 -y -hide_banner +[0.3931s] +``` + +#### 对比 + +[视频对比链接] + +## vfrdet + +> https://ffmpeg.org/ffmpeg-filters.html#vfrdet + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## vibrance + +> https://ffmpeg.org/ffmpeg-filters.html#vibrance + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## vif + +> https://ffmpeg.org/ffmpeg-filters.html#vif + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## vignette + +> https://ffmpeg.org/ffmpeg-filters.html#vignette + +制作或反转自然渐晕效果。 + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## vmafmotion + +> https://ffmpeg.org/ffmpeg-filters.html#vmafmotion + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## vstack + +> https://ffmpeg.org/ffmpeg-filters.html#vstack + +垂直堆叠输入视频。 + +所有流必须具有相同的像素格式和相同的宽度。 + +请注意,此过滤器比使用覆盖和填充过滤器创建相同的输出要快。 + +### 参数 + +- inputs 设置输入流的数量。预设值为 2。 +- shortest 如果设置为 1,则在最短输入终止时强制输出终止。预设值为 0。 + +### 示例 + +```python +vtools.vstack_videos(dst, v1, v1) +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\v1.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\v1.mp4 -filter_complex "[0][1]vstack=inputs=2:shortest=0[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\videos_hstack.mp4 -y -hide_banner +[17.4009s] +``` + +#### 对比 + +[视频对比链接] + +## w3fdif + +> https://ffmpeg.org/ffmpeg-filters.html#w3fdif + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## waveform + +> https://ffmpeg.org/ffmpeg-filters.html#waveform + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## weave, doubleweave + +> https://ffmpeg.org/ffmpeg-filters.html#doubleweave + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## xbr + +> https://ffmpeg.org/ffmpeg-filters.html#xbr + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## xfade + +> https://ffmpeg.org/ffmpeg-filters.html#xfade + +将交叉淡入淡出从一个输入视频流应用到另一输入视频流。 交叉渐变适用于指定的持续时间。 + +### 参数 + +- transition 指定转场 +- duration 设置淡入淡出持续时间(以秒为单位)。默认持续时间为 1 秒。 +- offset 设置相对于第一个输入流的淡入淡出开始时间(以秒为单位)。默认偏移量为 0。 +- expr 为自定义过渡效果设置表达式。 + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## xmedian + +> https://ffmpeg.org/ffmpeg-filters.html#xmedian + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## xstack + +> https://ffmpeg.org/ffmpeg-filters.html#xstack + +将视频输入堆叠到自定义布局中。所有流必须具有相同的像素格式。请注意,如果输入大小不同,则可能会出现间隙或重叠。 + +### 参数 + +- inputs 设置输入流的数量。预设值为 2。 +- layout 指定输入的布局。此选项要求用户明确设置所需的布局配置。这将设置每个视频输入在输出中的位置。每个输入均以“|”分隔。第一个数字代表列,第二个数字代表行。数字从 0 开始,并以“_”分隔。可以选择使用 wX 和 hX,其中 X 是从中获取宽度或高度的视频输入。以“+”分隔时,可以使用多个值。在这种情况下,将值相加。请注意,如果输入大小不同,可能会出现间隙,因为并非所有输出视频帧都会被填充。同样,如果视频的位置没有为相邻视频的整个帧留出足够的空间,它们也可以彼此重叠。对于 2 个输入,默认布局设置为 0_0 | w0_0。在所有其他情况下,必须由用户设置布局。 +- shortest 如果设置为 1,则在最短输入终止时强制输出终止。预设值为 0。 +- fill 如果设置为有效颜色,则所有未使用的像素将被该颜色填充。默认情况下,填充设置为无,因此将其禁用。 + +### 示例 + +#### 2x2 四格布局 + +``` +input1(0, 0) | input3(w0, 0) +input2(0, h0) | input4(w0, h0) +``` + +```python +vtools.xstack_videos( + media_v0, media_v1, media_v2, media_v0, + dst=dst, layout="0_0|0_h0|w0_0|w0_h0", +) +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\1.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\2.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0][1][2][3]xstack=inputs=4:layout=0_0|0_h0|w0_0|w0_h0:shortest=0[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_xstack.mp4 -y -hide_banner +[0.7751s] +``` + +#### 对比 + +[视频对比链接] + +#### 1x4 四格布局 + +``` +input1(0, 0) +input2(0, h0) +input3(0, h0+h1) +input4(0, h0+h1+h2) +``` + +```python +vtools.xstack_videos( + media_v0, media_v1, media_v2, media_v0, + dst=dst, layout="0_0|0_h0|0_h0+h1|0_h0+h1+h2", +) +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\1.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\2.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0][1][2][3]xstack=inputs=4:layout=0_0|0_h0|0_h0+h1|0_h0+h1+h2:shortest=0[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_xstack2.mp4 -y -hide_banner +[0.6386s] +``` + +#### 对比 + +[视频对比链接] + +#### 3x3 四格布局 + +``` +input1(0, 0) | input4(w0, 0) | input7(w0+w3, 0) +input2(0, h0) | input5(w0, h0) | input8(w0+w3, h0) +input3(0, h0+h1) | input6(w0, h0+h1) | input9(w0+w3, h0+h1) +``` + +```python +vtools.xstack_videos( + media_v0, media_v1, media_v2, + media_v1, media_v2, media_v0, + media_v2, media_v0, media_v1, + dst=dst, layout="0_0|0_h0|0_h0+h1|w0_0|w0_h0|w0_h0+h1|w0+w3_0|w0+w3_h0|w0+w3_h0+h1", +) +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\1.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\2.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\1.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\2.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\2.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\1.mp4 -filter_complex "[0][1][2][3][4][5][6][7][8]xstack=inputs=9:layout=0_0|0_h0|0_h0+h1|w0_0|w0_h0|w0_h0+h1|w0+w3_0|w0+w3_h0|w0+w3_h0+h1:shortest=0[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_xstack3.mp4 -y -hide_banner +[1.4246s] +``` + +#### 对比 + +[视频对比链接] + +#### 4x4 四格布局 + +``` +input1(0, 0) | input5(w0, 0) | input9 (w0+w4, 0) | input13(w0+w4+w8, 0) +input2(0, h0) | input6(w0, h0) | input10(w0+w4, h0) | input14(w0+w4+w8, h0) +input3(0, h0+h1) | input7(w0, h0+h1) | input11(w0+w4, h0+h1) | input15(w0+w4+w8, h0+h1) +input4(0, h0+h1+h2)| input8(w0, h0+h1+h2)| input12(w0+w4, h0+h1+h2)| input16(w0+w4+w8, h0+h1+h2) +``` + +```python +vtools.xstack_videos( + media_v0, media_v1, media_v2, media_v0, + media_v1, media_v2, media_v0, media_v2, + media_v1, media_v2, media_v0, media_v1, + media_v1, media_v2, media_v2, media_v0, + dst=dst, layout="0_0|0_h0|0_h0+h1|0_h0+h1+h2|w0_0|w0_h0|w0_h0+h1|w0_h0+" + "h1+h2|w0+w4_0|w0+w4_h0|w0+w4_h0+h1|w0+w4_h0+h1+h2|w0+w4" + "+w8_0|w0+w4+w8_h0|w0+w4+w8_h0+h1|w0+w4+w8_h0+h1+h2", +) +``` + +``` +ffmpeg -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\1.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\2.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\1.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\2.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\2.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\1.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\2.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\1.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\1.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\2.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\2.mp4 -hwaccel cuda -vcodec h264_cuvid -i testdata\media\0.mp4 -filter_complex "[0][1][2][3][4][5][6][7][8][9][10][11][12][13][14][15]xstack=inputs=16:layout=0_0|0_h0|0_h0+h1|0_h0+h1+h2|w0_0|w0_h0|w0_h0+h1|w0_h0+h1+h2|w0+w4_0|w0+w4_h0|w0+w4_h0+h1|w0+w4_h0+h1+h2|w0+w4+w8_0|w0+w4+w8_h0|w0+w4+w8_h0+h1|w0+w4+w8_h0+h1+h2:shortest=0[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_xstack4.mp4 -y -hide_banner +``` + +#### 对比 + +[视频对比链接] + +#### 2x2 四格布局 + +``` + +``` + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## yadif + +> https://ffmpeg.org/ffmpeg-filters.html#yadif + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## yadif_cuda + +> https://ffmpeg.org/ffmpeg-filters.html#yadif_cuda + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## yaepblur + +> https://ffmpeg.org/ffmpeg-filters.html#yaepblur + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] + +## zoompan + +> https://ffmpeg.org/ffmpeg-filters.html#zoompan + +缩放平移效果。 + +### 参数 + +- zoom, z 设置缩放表达式。范围是 1-10。默认值为 1。 +- x +- y 设置 x 和 y 表达式。默认值为 0。 +- d 以帧数设置持续时间表达式。设置单个输入图像将持续多少帧效果。 +- s 设置输出图像的大小,默认为“hd720”。 +- fps 设置输出帧频,默认为“25”。 + +每个表达式可以包含以下常量: + +- in_w, iw +- in_h, ih 输入视频的宽度和高度 +- out_w, ow +- out_h, oh 输出的宽度和高度,即宽度和高度表达式指定的填充区域的大小 +- in 输入帧数 +- on 输出帧数 +- in_time, it 输入时间戳,以秒为单位。如果输入的时间戳未知,则为 NAN。 +- out_time, time, ot 输出时间戳,以秒为单位。 +- x +- y 根据当前输入框的“x”和“y”表达式最后计算的“x”和“y”位置。 +- px +- py 上一个输入帧的最后一个输出帧的“x”和“y”,如果还没有这样的帧(第一个输入帧),则为 0。 +- zoom 上一次从“z”表达式为当前输入帧计算的缩放 +- pzoom 最近计算的上一个输入帧的最后一个输出帧的缩放 +- duration 当前输入帧的输出帧数。根据每个输入帧的“d”表达式计算得出 +- pduration 为上一个输入帧创建的输出帧数 +- a iw / ih +- sar 输入样本宽高比 +- dar 输入显示宽高比 (iw / ih) * sar + +### 示例 + +```python +_ = input(i1).zoompan(z="min(zoom+0.0015,1.5)", + d=700, x="if(gte(zoom,1.5),x,x+1/a)", + y="if(gte(zoom,1.5),y,y+1)").output(dst).run() +``` + +``` +ffmpeg -i testdata\i1.jpg -filter_complex "[0]zoompan=d=700:x=if(gte(zoom\,1.5)\,x\,x+1/a):y=if(gte(zoom\,1.5)\,y\,y+1):z=min(zoom+0.0015\,1.5)[tag0]" -vcodec h264_nvenc -map [tag0] C:\Users\Admin\Videos\transform\v0_zoompan.mp4 -y -hide_banner +[3.4039s] +``` + +#### 对比 + +[视频对比链接] + +## zscale + +> https://ffmpeg.org/ffmpeg-filters.html#zscale + + +### 参数 + + +### 示例 + +```python + +``` + +``` + +``` + +#### 对比 + +[视频对比链接] diff --git a/ffmpeg/__init__.py b/ffmpeg/__init__.py new file mode 100644 index 0000000..56207b4 --- /dev/null +++ b/ffmpeg/__init__.py @@ -0,0 +1,160 @@ +''' +Date: 2021.02.25 14:34:07 +Description: Omit +LastEditors: Rustle Karl +LastEditTime: 2021.04.26 12:23:21 +''' +import subprocess + +from pkgs import color + +from ._ffmpeg import input, input_source, merge_outputs, output +from ._ffplay import (detect_device_available, ffplay_audio, ffplay_video, + run_ffplay) +from ._ffprobe import FFprobe, metadata, run_ffprobe +from ._progress import show_progress +from ._utils import convert_kwargs_to_cmd_line_args +from ._view import view +from .filters import afilters, avfilters, vfilters +from .nodes import FFmpegError +from .tools import atools, avtools, iotools, rtmp, vtools + +__all__ = [ + 'FFmpeg', + 'FFmpegError', + 'FFprobe', + '__version__', + 'afilters', + 'atools', + 'avfilters', + 'avtools', + 'constants', + 'detect_device_available', + 'ffplay_audio', + 'ffplay_video', + 'input', + 'input_source', + 'iotools', + 'merge_outputs', + 'metadata', + 'output', + 'rtmp', + 'run_ffmpeg', + 'run_ffplay', + 'run_ffprobe', + 'show_progress', + 'vfilters', + 'view', + 'vtools', +] + +__version__ = '1.0.0' + + +def run_ffmpeg(option: str = None, stdout=None, check=True, **kwargs) -> subprocess.CompletedProcess: + '''Run raw ffmpeg command.''' + args = ['ffmpeg', '-hide_banner'] + + if option: + args.append(f'-{option}') + + args.extend(convert_kwargs_to_cmd_line_args(kwargs)) + + return subprocess.run(args, stdout=stdout, encoding='utf-8', check=check) + + +def _findstr(option, str_: str = None): + stdout = run_ffmpeg(option, stdout=subprocess.PIPE).stdout + + if str_ is None: + print(stdout) + else: + print('\n'.join([line.replace(str_, color.sredf(str_)) + for line in stdout.splitlines() if str_ in line])) + + +class FFmpeg(object): + + @staticmethod + def cuda(): + FFmpeg.hwaccels() + + color.cyanln('Cuda Encoders:') + FFmpeg.codecs(findstr='_nvenc') + + color.cyanln('Cuda Decoders:') + FFmpeg.codecs(findstr='_cuvid') + + @staticmethod + def version(): + run_ffmpeg('version') + + @staticmethod + def formats(findstr: str = None): + _findstr('formats', str_=findstr) + + @staticmethod + def devices(findstr: str = None): + _findstr('devices', str_=findstr) + + @staticmethod + def codecs(findstr: str = None): + ''' + Examples: + FFmpeg.codecs(find='_cuvid') + FFmpeg.codecs(find='_nvenc') + ''' + _findstr('codecs', str_=findstr) + + @staticmethod + def decoders(findstr: str = None): + _findstr('decoders', str_=findstr) + + @staticmethod + def encoders(findstr: str = None): + _findstr('encoders', str_=findstr) + + @staticmethod + def bsfs(): + run_ffmpeg('bsfs') + + @staticmethod + def protocols(findstr: str = None): + _findstr('protocols', str_=findstr) + + @staticmethod + def filters(findstr: str = None): + _findstr('filters', str_=findstr) + + @staticmethod + def pix_fmts(findstr: str = None): + _findstr('pix_fmts', str_=findstr) + + @staticmethod + def layouts(findstr: str = None): + _findstr('layouts', str_=findstr) + + @staticmethod + def colors(findstr: str = None): + _findstr('colors', str_=findstr) + + @staticmethod + def hwaccels(): + run_ffmpeg('hwaccels') + + @staticmethod + def help(filter: str): + run_ffmpeg(help='filter=' + filter) + + @staticmethod + def list_devices(f='dshow', i='dummy'): + run_ffmpeg(check=False, list_devices=True, f=f, i=i) + + @staticmethod + def list_options(f='dshow', i='dummy'): + ''' + Examples: + ffmpeg -list_options true -f dshow -i video='USB2.0 PC CAMERA' + ffmpeg -list_options true -f dshow -i audio='麦克风 (2- USB2.0 MIC)' + ''' + run_ffmpeg(check=False, list_options=True, f=f, i=i) diff --git a/ffmpeg/_dag.py b/ffmpeg/_dag.py new file mode 100644 index 0000000..dc3809a --- /dev/null +++ b/ffmpeg/_dag.py @@ -0,0 +1,156 @@ +''' +Date: 2021.02.26 21:39:59 +Description: Omit +LastEditors: Rustle Karl +LastEditTime: 2021.04.29 15:42:51 +''' +from __future__ import annotations + +from collections import defaultdict +from functools import cached_property +from typing import Dict, List, NamedTuple, Tuple + +__all__ = [ + "DagEdge", + "DagNode", + "Edge", + "get_incoming_edges", + "get_outgoing_edges", + "topological_sort" +] + + +class Edge(NamedTuple): + Node: DagNode + Label: str + Selector: str + + +class DagEdge(NamedTuple): + '''DagNodes are connected by edges. An edge + connects two nodes with a label for each side.''' + + DownstreamNode: DagNode # downstream/child node + DownstreamLabel: str # label on the incoming side of the downstream node + UpstreamNode: DagNode # upstream/parent node + UpstreamLabel: str # label on the outgoing side of the upstream node + Selector: str + + +class DagNode(object): + '''Node in a directed-acyclic graph (DAG).''' + + def __init__(self, label: str, incoming_edge_graph: Dict[str, Edge], + node_type: str, args: List, kwargs: Dict): + self._label = label + self._args = list(map(str, args)) if args else [] + self._kwargs = kwargs or {} + self._node_type = node_type + self._incoming_edge_graph = incoming_edge_graph + + def __repr__(self): + return f" {self.detail}" + + @cached_property + def detail(self) -> str: + """Return a full string representation of the node.""" + props = self._args + [f'{k}={self._kwargs[k]}' for k in sorted(self._kwargs)] + if props: + return f'{self.brief}:{",".join(props)}' + else: + return self.brief + + @property + def brief(self) -> str: + """Return a partial/concise representation of the node.""" + return self._label + + @property + def Label(self) -> str: + return self._label + + @property + def Type(self) -> str: + return self._node_type + + @property + def incoming_edge_graph(self) -> Dict[str, Edge]: + return self._incoming_edge_graph + + @cached_property + def incoming_edges(self) -> Tuple[DagEdge]: + """Provides information about all incoming edges that connect to this node.""" + return get_incoming_edges(self, self.incoming_edge_graph) + + def stream(self, label: str = None, selector: str = None) -> DagEdge: + raise NotImplementedError + + # TODO cause recursion, maybe it's not a good idea. It doesn't seem necessary. + # def __eq__(self, other: DagNode): + # return (self.Type, self.detail, self.incoming_edges) == (other.Type, other.detail, other.incoming_edges) + # + # def __hash__(self): + # return hash((self.Type, self.detail)) + + +def get_incoming_edges(node: DagNode, incoming_edge_graph: Dict[str, Edge]) -> Tuple[DagEdge]: + incoming_edges = [] + for label, edge in incoming_edge_graph.items(): + incoming_edges.append(DagEdge(node, label, edge.Node, edge.Label, edge.Selector)) + return tuple(incoming_edges) + + +def get_outgoing_edges(node: DagNode, outgoing_edge_graph: Dict[str, List[Edge]]) -> Tuple[DagEdge]: + outgoing_edges = [] + for label, edges in outgoing_edge_graph.items(): + for edge in edges: + outgoing_edges.append(DagEdge(edge.Node, edge.Label, node, label, edge.Selector)) + return tuple(outgoing_edges) + + +def topological_sort(nodes: List[DagNode]) -> Tuple[Tuple[DagNode], Dict[DagNode, Dict[str, List[Edge]]]]: + '''NOTE nodes can be part of the nodes, but not all. + + DagNodes may have any number of incoming edges and any number of + outgoing edges. DagNodes keep track only of their incoming edges, but + the entire graph structure can be inferred by looking at the furthest + downstream nodes and working backwards. + ''' + outgoing_edge_graphs = defaultdict(lambda: defaultdict(list)) + dependent_count = defaultdict(lambda: 0) + outgoing_graph = defaultdict(list) + visited_nodes = set() + sorted_nodes = [] + + # Convert to a friendly representation + while nodes: + node = nodes.pop() + if node not in visited_nodes: + for edge in node.incoming_edges: + outgoing_graph[edge.UpstreamNode].append(node) # node == edge.DownstreamNode + outgoing_edge_graphs[edge.UpstreamNode][edge.UpstreamLabel]. \ + append(Edge(node, edge.DownstreamLabel, edge.Selector)) + nodes.append(edge.UpstreamNode) + visited_nodes.add(node) + + # Calculate the number of dependents + for val in outgoing_graph.values(): + for v in val: + dependent_count[v] += 1 + + # Zero dependent nodes + stack = [node for node in visited_nodes if dependent_count[node] == 0] + + while stack: + node = stack.pop() + sorted_nodes.append(node) + + for n in outgoing_graph[node]: + dependent_count[n] -= 1 + if dependent_count[n] == 0: + stack.append(n) + + if len(sorted_nodes) != len(visited_nodes): + raise RuntimeError('This graph is not a DAG') + + return tuple(sorted_nodes), outgoing_edge_graphs diff --git a/ffmpeg/_ffmpeg.py b/ffmpeg/_ffmpeg.py new file mode 100644 index 0000000..0c60274 --- /dev/null +++ b/ffmpeg/_ffmpeg.py @@ -0,0 +1,120 @@ +''' +Date: 2021.02.25 15:02:34 +Description: Omit +LastEditors: Rustle Karl +LastEditTime: 2021.04.30 09:30:21 +''' +from pathlib import Path + +from . import constants, settings +from ._node import Stream +from ._utils import (convert_kwargs_string, drop_empty_dict_values, + drop_empty_list_values) +from .expression import generate_resolution +from .nodes import (FilterableStream, InputNode, MergeOutputsNode, OutputNode, + OutputStream, filterable) + + +# http://ffmpeg.org/ffmpeg-all.html + + +def input(source, video_device: str = None, audio_device: str = None, format: str = None, + pixel_format=None, fps: int = None, start_position: float = None, duration: float = None, + to_position: float = None, start_position_eof: float = None, stream_loop: int = None, + frame_rate: int = None, width: int = None, height: int = None, hwaccel: str = None, + vcodec: str = None, enable_cuda=True, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg.html#Main-options""" + + if video_device: + kwargs['source'] = "video=" + video_device + elif audio_device: + kwargs['source'] = "audio=" + audio_device + elif source is None: + raise ValueError("Must specify an input source") + + kwargs['source'] = str(source) + + if settings.CUDA_ENABLE and enable_cuda and Path(source).suffix not in constants.IMAGE_FORMATS: + kwargs['hwaccel'] = "cuda" + kwargs['vcodec'] = settings.DEFAULT_DECODER + + kwargs = drop_empty_dict_values(kwargs, hwaccel=hwaccel, vcodec=vcodec, + f=format, pix_fmt=pixel_format, ss=start_position, + t=duration, to=to_position, sseof=start_position_eof, + stream_loop=stream_loop, r=fps, framerate=frame_rate, + s=generate_resolution(width, height)) + + return InputNode(args=None, kwargs=kwargs).stream() + + +def input_source(source: str, color: str = None, level: int = None, + size: str = None, rate: int = None, sar: str = None, + duration: float = None, decimals: bool = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#Video-Sources""" + if source not in constants.VIDEO_SOURCES: + raise ValueError("Here is currently available video sources: %s" % constants.VIDEO_SOURCES) + + args = convert_kwargs_string(color=color, level=level, size=size, rate=rate, + sar=sar, duration=duration, decimals=decimals) + + if args: + source = f"{source}={args}" + + return input(source, format="lavfi", enable_cuda=False) + + +@filterable() +def output(*streams_or_source, vn=False, an=False, ar=None, ab=None, ac=None, + acodec=None, vcodec=None, codec: str = None, aq_scale=None, vq_scale=None, + aspect=None, fps=None, format=None, pixel_format=None, video_bitrate=None, + audio_bitrate=None, v_profile=None, preset=None, mov_flags=None, + shortest=False, frame_size=None, v_frames: int = None, start_position: float = None, + duration: float = None, video_filter: str = None, audio_filter: str = None, + ignore_output=False, preview: bool = False, enable_cuda=True, + args: list = None, **kwargs) -> OutputStream: + if args is None: + args = [] + + args = drop_empty_list_values(args, vn=vn, an=an, shortest=shortest) + + if ignore_output: + kwargs['source'] = "NUL" if constants.WINDOWS else "/dev/null" + format = "null" + + if preview: + kwargs['source'] = "preview" + format = "sdl" + + streams_or_source = list(streams_or_source) + if not kwargs.get('source'): + if not isinstance(streams_or_source[-1], (str, Path)): + raise ValueError("Must specify an output source") + kwargs['source'] = str(streams_or_source.pop(-1)) + + streams = streams_or_source + + if settings.CUDA_ENABLE and enable_cuda and not preview and \ + Path(kwargs['source']).suffix not in constants.IMAGE_FORMATS: + kwargs['vcodec'] = settings.DEFAULT_ENCODER + + if video_bitrate is not None: + kwargs['b:v'] = video_bitrate + + if audio_bitrate is not None: + kwargs['b:a'] = audio_bitrate + + if v_profile is not None: + kwargs['profile:v'] = v_profile + + kwargs = drop_empty_dict_values(kwargs, r=fps, ss=start_position, t=duration, + aspect=aspect, f=format, pix_fmt=pixel_format, ar=ar, + ab=ab, ac=ac, codec=codec, acodec=acodec, vcodec=vcodec, + aq=aq_scale, vq=vq_scale, s=frame_size, vframes=v_frames, + preset=preset, movflags=mov_flags, vf=video_filter, af=audio_filter) + + return OutputNode(streams, args=args, kwargs=kwargs).stream() + + +def merge_outputs(*streams: Stream) -> OutputStream: + """Include all given outputs in one ffmpeg command line.""" + return MergeOutputsNode(streams).stream() diff --git a/ffmpeg/_ffplay.py b/ffmpeg/_ffplay.py new file mode 100644 index 0000000..c7895fb --- /dev/null +++ b/ffmpeg/_ffplay.py @@ -0,0 +1,79 @@ +''' +Date: 2021.03.06 23:06:21 +LastEditors: Rustle Karl +LastEditTime: 2021.04.25 13:39:24 +''' +import subprocess +from pathlib import Path + +from pkgs import color + +from ._utils import join_cmd_args_seq, convert_kwargs_to_cmd_line_args + +__all__ = [ + "detect_device_available", + "ffplay_audio", + "ffplay_video", + "run_ffplay", +] + + +def run_ffplay(source: str = None, direct_print=True, **kwargs): + """Run raw ffplay command.""" + args = ["ffplay", "-hide_banner"] + + _kwargs = {} + for k, v in kwargs.items(): + if v is True: + args.append(f"-{k}") + elif v: + _kwargs[k] = v + + args.extend(convert_kwargs_to_cmd_line_args(_kwargs, sort=False)) + + if source is not None: + args.append(Path(source).as_posix()) + + if direct_print: + color.greenln(join_cmd_args_seq(args)) + + return subprocess.Popen(args) + + +def ffplay_audio(source: str, f: str = None, channels: int = None, ar: int = None, + ss: float = None, t: float = None, loop: int = None, vf: str = None): + """ + Examples: + ffplay song.pcm -f s16le -channels 2 -ar 4 + '""" + run_ffplay(source, f=f, channels=channels, ar=ar, ss=ss, t=t, loop=loop, vf=vf) + + +def ffplay_video(source: str, x: int = None, y: int = None, video_size: str = None, + pixel_format: str = None, fs: bool = False, an: bool = False, + vn: bool = False, sn: bool = False, f: str = None, s: str = None, + sync: str = None, ss: float = None, t: float = None, vf: str = None, + af: str = None, seek_interval: int = None, window_title=None, + show_mode=None, loop: int = None): + """ + Examples: + ffplay -f rawvideo -pixel_format yuv420p -s 480*480 texture.yuv + ffplay -f rawvideo -pixel_format rgb24 -s 480*480 texture.rgb + + ffplay video.mp4 -sync audio + ffplay video.mp4 -sync video + ffplay video.mp4 -sync ext + '""" + run_ffplay(source, x=x, y=y, video_size=video_size, pixel_format=pixel_format, + fs=fs, an=an, vn=vn, sn=sn, f=f, s=s, sync=sync, ss=ss, t=t, vf=vf, + af=af, seek_interval=seek_interval, window_title=window_title, + showmode=show_mode, loop=loop) + + +def detect_device_available(source: str, f: str): + """ + Examples: + ffplay -f dshow -i video='USB2.0 PC CAMERA + ffplay -f vfwcap -i 0 + '""" + run_ffplay(source, f=f).wait() diff --git a/ffmpeg/_ffprobe.py b/ffmpeg/_ffprobe.py new file mode 100644 index 0000000..0bafd07 --- /dev/null +++ b/ffmpeg/_ffprobe.py @@ -0,0 +1,134 @@ +''' +Date: 2021.02-25 14:34:07 +LastEditors: Rustle Karl +LastEditTime: 2021.04.25 13:40:57 +''' +import json +import subprocess +from pathlib import Path +from typing import Dict, List, Union + +from ._utils import convert_kwargs_to_cmd_line_args, drop_empty_list_values +from .constants import JSON_FORMAT +from .nodes import FFmpegError + +__all__ = [ + 'FFprobe', + 'metadata', + 'run_ffprobe', +] + + +def run_ffprobe(source, *args: List, **kwargs: Dict): + '''https://ffmpeg.org/ffprobe-all.html''' + args = ['ffprobe', '-hide_banner'] + list(args) + convert_kwargs_to_cmd_line_args(kwargs) + [source] + + proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = proc.communicate() + if proc.returncode != 0: + raise FFmpegError('ffprobe', stdout, stderr) + + if kwargs.get('print_format') == JSON_FORMAT: + return json.loads(stdout) + + return stderr + + +def metadata(filepath, show_format=False, show_streams=False, show_frames=False, + show_packets=False, show_programs=False, print_format=None, + timeout: float = None, **kwargs) -> Union[dict, str]: + if timeout: + kwargs['timeout'] = timeout * 1000 * 1000 # s + + if print_format == JSON_FORMAT: + kwargs['print_format'] = JSON_FORMAT + show_streams = True + + args = drop_empty_list_values([], show_format=show_format, + show_streams=show_streams, show_frames=show_frames, + show_packets=show_packets, show_programs=show_programs) + + return run_ffprobe(filepath, *args, **kwargs) + + +class FFprobe(object): + + def __init__(self, source: Union[str, Path], show_format=False, + show_streams=True, show_frames=False, show_packets=False, + show_programs=False, print_format='json', timeout: float = None, **kwargs): + self._source = source + self._metadata = metadata(source, show_format=show_format, + show_streams=show_streams, show_frames=show_frames, + show_packets=show_packets, show_programs=show_programs, + print_format=print_format, timeout=timeout, **kwargs) + self._streams = self._metadata.get('streams', []) + + if len(self._streams) == 0: + raise ValueError('This media file does not contain any streams.') + + for stream in self._streams: + codec_type = stream.get('codec_type') + if codec_type == 'video': + self.__video = stream + elif codec_type == 'audio': + self.__audio = stream + + @property + def source(self): + return self._source + + @property + def metadata(self): + return self._metadata + + @property + def streams(self): + return self._streams + + @property + def video(self): + return self.__video or {} + + @property + def video_duration(self) -> float: + return float(self.video.get('duration')) or 0 + + @property + def video_scale(self) -> List[int]: + return self.video.get('width') or 0, self.video.get('height') or 0 + + @property + def video_frame_rate(self) -> float: + return eval(self.video.get('r_frame_rate', 30)) + + @property + def video_total_frames(self) -> int: + '''video_total_frames is the number of frames as indicated + in the file metadata - this may not always be accurate.''' + return int(self.video.get('nb_frames')) or \ + int(self.video.get('nb_read_frames')) or \ + int(self.video_frame_rate * self.video_duration) or 0 + + @property + def video_tags(self) -> dict: + return self.video.get('tags', {}) + + @property + def video_codec(self) -> str: + return self.video_tags.get('ENCODER') or \ + self.video.get('codec_long_name') or \ + self.video.get('codec_name') + + @property + def audio(self): + return self.__audio or {} + + @property + def audio_duration(self) -> float: + return self.audio.get('duration') or 0 + + def __str__(self): + return '' % self.source + + def __dict__(self): + return self.metadata diff --git a/ffmpeg/_node.py b/ffmpeg/_node.py new file mode 100644 index 0000000..db685a1 --- /dev/null +++ b/ffmpeg/_node.py @@ -0,0 +1,271 @@ +''' +Date: 2021.02.25 14:34:07 +Description: Omit +LastEditors: Rustle Karl +LastEditTime: 2021.04.29 11:01:29 +''' +from __future__ import annotations + +from typing import Any, Dict, List, Tuple, Type, Union + +from ._dag import DagEdge, DagNode, Edge, get_outgoing_edges + +__all__ = [ + 'Node', + 'NodeTypes', + 'Stream', + 'format_input_stream_tag', + 'get_filters_spec', + 'get_stream_spec_nodes', + 'streamable', +] + + +def get_stream_graph(stream_spec: Union[Stream, Dict[Any, Stream], + List[Stream], Tuple[Stream]]) -> Dict[str, Stream]: + stream_graph = dict() + + if isinstance(stream_spec, Dict): + stream_graph = stream_spec + elif isinstance(stream_spec, (List, Tuple)): + stream_graph = dict(enumerate(stream_spec)) + elif isinstance(stream_spec, Stream): + stream_graph = {None: stream_spec} + + return stream_graph + + +def get_stream_graph_nodes(stream_graph: Dict[str, Stream]) -> List[DagNode]: + nodes = [] + + for stream in stream_graph.values(): + if not isinstance(stream, Stream): + raise TypeError(f"Expected ; got {type(stream)}") + + nodes.append(stream.Node) + + return nodes + + +def get_stream_spec_nodes(stream_spec: Any) -> List[DagNode]: + return get_stream_graph_nodes(get_stream_graph(stream_spec)) + + +def _get_types_repr(types: Tuple[Type]) -> str: + if not isinstance(types, (List, Tuple)): + types = [types] + + return ', '.join(['{}.{}'.format(x.__module__, x.__name__) for x in types]) + + +class Stream(object): + """Represents the outgoing edge of an upstream node; + may be used to create more downstream nodes.""" + + def __init__(self, upstream_node: DagNode, upstream_label: str, node_types=None, selector=None): + if node_types and not isinstance(upstream_node, node_types): + raise TypeError('Expected upstream node to be of one of the following type(s): ' + '{}; got {}'.format(_get_types_repr(node_types), type(upstream_node))) + + self._node = upstream_node + self._label = upstream_label + self._selector = selector + + @property + def Edge(self) -> Edge: + return Edge(self.Node, self.Label, self.Selector) + + @property + def Node(self): + return self._node + + @property + def Label(self): + return self._label + + @property + def Selector(self): + return self._selector + + @property + def audio(self) -> Stream: + '''Select the audio-portion of a stream.''' + return self['a'] + + @property + def video(self) -> Stream: + '''Select the video-portion of a stream.''' + return self['v'] + + def view(self, save_path=None, detail=False, show_labels=True, pipe=False, **kwargs): + raise NotImplementedError + + def __repr__(self): + node = self._node.__repr__() + selector = f':{self._selector}' if self._selector else '' + return f'{node}[{self.Label!r}{selector}]' + + def __getitem__(self, index: str) -> Stream: + '''Select a component (audio, video) of the stream.''' + if self._selector is not None: + raise ValueError(f'Stream already has a selector: {self._selector}') + elif not isinstance(index, str): + raise TypeError(f"Expected string index (e.g. 'a'); got {index!r}") + return self._node.stream(label=self._label, selector=index) + + +class NodeTypes(object): + Base = 'Base' + Input = 'Input' + Filter = 'Filter' + Global = 'Global' + Output = 'Output' + + # special filter nodes + Movie = 'movie' + + +class Node(DagNode): + + def __init__(self, label: str, stream_spec, incoming_stream_types: Tuple[Type[Stream]], + outgoing_stream_type: Type[Stream], min_inputs=0, max_inputs=0, + node_type: str = NodeTypes.Base, args: List = None, kwargs: Dict = None): + stream_graph = get_stream_graph(stream_spec) + + self._check_input_len(stream_graph, min_inputs, max_inputs) + self._check_input_types(stream_graph, incoming_stream_types) + + incoming_edge_graph = self._get_incoming_edge_graph(stream_graph) + + super().__init__(label, incoming_edge_graph, node_type, args, kwargs) + + self._outgoing_stream_type = outgoing_stream_type + self._incoming_stream_types = incoming_stream_types + + def stream(self, label: str = None, selector: str = None) -> Stream: + """Create an outgoing stream originating from this node. + + More nodes may be attached onto the outgoing stream. + """ + return self._outgoing_stream_type(self, label, selector=selector) + + def get_filter_spec(self, edges: Tuple[DagEdge] = None) -> str: + if self.Type == NodeTypes.Filter: + raise NotImplementedError + + def get_input_args(self) -> List[str]: + if self.Type == NodeTypes.Input: + raise NotImplementedError + + def get_output_args(self, stream_tag_graph: Dict[Tuple[DagNode, str], str]) -> List[str]: + if self.Type == NodeTypes.Output: + raise NotImplementedError + + def get_global_args(self) -> List[str]: + if self.Type == NodeTypes.Global: + raise NotImplementedError + + def __getitem__(self, index: Union[slice, str]) -> Stream: + """Create an outgoing stream originating from this node; syntactic sugar for ``self.stream(label)``. + It can also be used to apply a selector: e.g. ``node[0:'a']`` returns a stream with label 0 and + selector ``'a'``, which is the same as ``node.stream(label=0, selector='a')``. + + Example: + Process the audio and video portions of a stream independently:: + + input = ffmpeg.input('audio_video.mp4') + audio = input[:'a'].filter("aecho", 0.8, 0.9, 1000, 0.3) + video = input[:'v'].hflip() + """ + if isinstance(index, slice): + return self.stream(label=index.start, selector=index.stop) + else: + return self.stream(label=index) + + @classmethod + def _get_incoming_edge_graph(cls, stream_graph: Dict[str, Stream]) -> Dict[str, Edge]: + incoming_edge_graph = {} + for downstream_label, upstream in stream_graph.items(): + incoming_edge_graph[downstream_label] = upstream.Edge + return incoming_edge_graph + + @classmethod + def _check_input_len(cls, stream_graph, min_inputs: int, max_inputs: int): + if min_inputs and len(stream_graph) < min_inputs: + raise ValueError(f'Expected at least {min_inputs} input stream(s); got {len(stream_graph)}') + elif max_inputs and min_inputs < max_inputs < len(stream_graph): + raise ValueError(f'Expected at most {max_inputs} input stream(s); got {len(stream_graph)}') + + @classmethod + def _check_input_types(cls, stream_graph, incoming_stream_types): + for stream in stream_graph.values(): + if isinstance(stream, incoming_stream_types): + continue + raise TypeError('Expected incoming stream(s) to be of one of the following types:' + ' {}; got {}'.format(_get_types_repr(incoming_stream_types), type(stream))) + + +def streamable(stream: Stream = Stream): + def wrapper(func): + setattr(stream, func.__name__, func) + return func + + return wrapper + + +def format_input_stream_tag(stream_tag_graph: Dict[Tuple[DagNode, str], str], + edge: DagEdge, is_final=False) -> str: + prefix = stream_tag_graph[edge.UpstreamNode, edge.UpstreamLabel] + suffix = f':{edge.Selector}' if edge.Selector else '' + + if is_final and edge.UpstreamNode.Type == NodeTypes.Input: + _format = f'{prefix}{suffix}' # Special case: `-map` args should not have brackets for input nodes. + elif edge.DownstreamNode.Label == NodeTypes.Movie: + _format = '' # special filter + elif edge.UpstreamNode.Label == NodeTypes.Movie: # FIXME + _format = f'[0][{prefix}{suffix}]' + else: + _format = f'[{prefix}{suffix}]' + + return _format + + +def format_output_stream_tag(stream_tag_graph: Dict[Tuple[DagNode, str], str], edge: DagEdge) -> str: + return f'[{stream_tag_graph[edge.UpstreamNode, edge.UpstreamLabel]}]' + + +def get_filter_spec(node: Node, outgoing_edge_graph: Dict[str, List[Edge]], + stream_tag_graph: Dict[Tuple[DagNode, str], str]) -> str: + incoming_edges = node.incoming_edges + outgoing_edges = get_outgoing_edges(node, outgoing_edge_graph) + + inputs = [format_input_stream_tag(stream_tag_graph, edge) for edge in incoming_edges] + outputs = [format_output_stream_tag(stream_tag_graph, edge) for edge in outgoing_edges] + + return f"{''.join(inputs)}{node.get_filter_spec(outgoing_edges)}{''.join(outputs)}" + + +def allocate_filter_stream_tags(filter_nodes: List[Node], + stream_tag_graph: Dict[Tuple[DagNode, str], str], + outgoing_edge_graphs: Dict[DagNode, Dict[str, List[Edge]]]): + current_serial_number = 0 + + for upstream_node in filter_nodes: + outgoing_edge_graph = outgoing_edge_graphs[upstream_node] + for upstream_label, downstreams in outgoing_edge_graph.items(): + if len(downstreams) > 1: + raise ValueError(f"Encountered {upstream_node} with multiple outgoing edges " + f"with same upstream label {upstream_label!r}; a `split` filter is probably required") + + stream_tag_graph[upstream_node, upstream_label] = f'tag{current_serial_number}' + current_serial_number += 1 + + +def get_filters_spec(filter_nodes: List[Node], + stream_tag_graph: Dict[Tuple[DagNode, str], str], + outgoing_edge_graphs: Dict[DagNode, Dict[str, List[Edge]]]) -> str: + allocate_filter_stream_tags(filter_nodes, stream_tag_graph, outgoing_edge_graphs) + return ';'.join( + get_filter_spec(node, outgoing_edge_graphs[node], stream_tag_graph) + for node in filter_nodes + ) diff --git a/ffmpeg/_progress.py b/ffmpeg/_progress.py new file mode 100644 index 0000000..e5552f5 --- /dev/null +++ b/ffmpeg/_progress.py @@ -0,0 +1,107 @@ +''' +Date: 2021.03.01 13:25:12 +LastEditors: Rustle Karl +LastEditTime: 2021.03.01 19:46:40 +''' +import contextlib +import os +import shutil +import socket +import tempfile +from threading import Thread + +from tqdm import tqdm + +""" +Process video and report and show progress bar. + +This is an example of using the ffmpeg `-progress` option with a +unix-domain socket to report progress in the form of a progress bar. + +The video processing simply consists of converting the video to +sepia colors, but the same pattern can be applied to other use cases. +""" + + +@contextlib.contextmanager +def open_temporary_directory() -> str: + temporary_directory = tempfile.mkdtemp() + try: + yield temporary_directory + finally: + shutil.rmtree(temporary_directory) + + +def accept(s: socket.socket, handler): + """Read progress events from a unix-domain socket.""" + conn, _ = s.accept() + + buffer = b"" + previous = 0 + + while more := conn.recv(16): + buffer += more + lines = buffer.splitlines() + + for line in lines[:-1]: + parts = line.decode().split("=") + if len(parts) < 2: + continue + + k, v = parts[:2] + if v == "continueframe": + k = v + frame = int(parts[2]) + v = frame - previous + previous = frame + + handler(k, v) + + buffer = lines[-1] + + conn.close() + + +@contextlib.contextmanager +def watch_progress(handler): + """Context manager for creating a unix-domain socket and listen for + ffmpeg progress events. + + The socket domain is yielded from the context manager and the + socket is closed when the context manager is exited. + + Args: + handler: a function to be called when progress events are + received; receives a ``key`` argument and ``value`` argument. + """ + with open_temporary_directory() as temporary_d: + unix_sock = os.path.join(temporary_d, "sock") + s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + + with contextlib.closing(s): + s.bind(unix_sock) + s.listen(1) + Thread(target=accept, args=(s, handler)).start() + + yield unix_sock + + +@contextlib.contextmanager +def show_progress(total_frames): + """Create a unix-domain socket to watch progress and + render tqdm progress bar.""" + import sys + + if sys.platform != "linux": + raise OSError("Only supports Linux platform") + + with tqdm(total=total_frames, desc="Processing", unit="f") as bar: + + def handler(key, value): + if key == "continueframe": + bar.update(value) + elif key == "progress" and value == "end": + bar.update(bar.total - bar.n) + + with watch_progress(handler) as unix_sock: + yield unix_sock diff --git a/ffmpeg/_utils.py b/ffmpeg/_utils.py new file mode 100644 index 0000000..c3e817b --- /dev/null +++ b/ffmpeg/_utils.py @@ -0,0 +1,105 @@ +''' +Date: 2021.02.25 14:34:07 +Description: Omit +LastEditors: Rustle Karl +LastEditTime: 2021.04.29 15:41:41 +''' +from typing import Dict, Iterable, List + +_empty_symbols = (None, '', [], {}) # exclude 0 +_filter_symbols = {"-filter_complex", "-vf", "-af", "-lavfi"} +_backslash = '\\' + + +def drop_empty_dict_values(already: Dict, **kwargs) -> Dict: + overlay = {k: v for k, v in kwargs.items() if v not in _empty_symbols} + return {**already, **overlay} + + +def drop_empty_list_values(already: list, **kwargs) -> list: + already = list(already) + for k, v in kwargs.items(): + if v: + already.append(f'-{k}') + return already + + +def convert_kwargs_string(**kwargs): + return ':'.join([f'{k}={v}' for k, v in kwargs.items() if v not in _empty_symbols]) + + +def escape(text: str, chars: str) -> str: + """Helper function to escape uncomfortable characters.""" + text = str(text) + chars = list(set(chars)) + + if _backslash in chars: + chars.remove(_backslash) + chars.insert(0, _backslash) + + for char in chars: + text = text.replace(char, _backslash + char) + + return text + + +def convert_kwargs_to_cmd_line_args(kwargs: Dict, sort=True) -> List[str]: + """Helper function to build command line arguments out of dict.""" + args = [] + keys = sorted(kwargs.keys()) if sort else kwargs.keys() + + for key in keys: + v = kwargs[key] + + # list, tuple, map + if isinstance(v, Iterable) and not isinstance(v, str): + for value in v: + args.append(f'-{key}') + if value not in _empty_symbols: + args.append(f'{value}') + continue + + args.append(f'-{key}') + + if v not in _empty_symbols: + args.append(f'{v}') + + return args + + +def join_cmd_args_seq(args: List[str]) -> str: + cmd_args_seq = list(args) + + for i in range(len(cmd_args_seq)): + if cmd_args_seq[i] in _filter_symbols: + cmd_args_seq[i + 1] = f'"{cmd_args_seq[i + 1]}"' + break + + return " ".join(cmd_args_seq) + + +def string_to_seconds(clock: str) -> int: + if isinstance(clock, (int, float)): + return clock + + hours, minutes, seconds = [int(c) for c in clock.split(":")] + return hours * 60 * 60 + minutes * 60 + seconds + + +def seconds_to_string(seconds: float) -> str: + if isinstance(seconds, str): + return seconds + + hours = seconds // (60 * 60) + minutes = seconds % (60 * 60) // 60 + seconds -= hours * 60 * 60 + minutes * 60 + return f"{hours:02.0f}:{minutes:02.0f}:{seconds:02.03f}" + + +if __name__ == "__main__": + assert escape('a:b', ':') == 'a\:b' + assert escape('a\\:b', ':\\') == 'a\\\\\\:b' + assert (escape('a:b,c[d]e%{}f\'g\'h\\i', '\\\':,[]%') == 'a\\:b\\,c\\[d\\]e\\%{}f\\\'g\\\'h\\\\i') + assert escape(123, ':\\') == '123' + + assert seconds_to_string(345.4246) == '00:05:45.425' diff --git a/ffmpeg/_view.py b/ffmpeg/_view.py new file mode 100644 index 0000000..6e8556a --- /dev/null +++ b/ffmpeg/_view.py @@ -0,0 +1,86 @@ +''' +Date: 2021.02-25 14:34:07 +LastEditors: Rustle Karl +LastEditTime: 2021.04.25 14:03:16 +''' +import tempfile + +import graphviz + +from ._dag import get_outgoing_edges, topological_sort +from ._node import get_stream_spec_nodes, Node, streamable +from .nodes import FilterNode, InputNode, OutputNode + +__all__ = ['view'] + +_RIGHT_ARROW = '\u2192' + + +def _get_node_color(node: Node): + color = None + + if isinstance(node, InputNode): + color = '#99CC00' + elif isinstance(node, OutputNode): + color = '#99CCFF' + elif isinstance(node, FilterNode): + color = '#FFCC00' + + return color + + +@streamable() +def view(stream_spec, save_path=None, detail=False, show_labels=True, pipe=False): + if pipe and save_path is not None: + raise ValueError("Can\'t specify both `source` and `pipe`") + elif not pipe and save_path is None: + save_path = tempfile.mktemp() + + nodes = get_stream_spec_nodes(stream_spec) + sorted_nodes, outgoing_edge_graphs = topological_sort(nodes) + + graph = graphviz.Digraph(format='png') + graph.attr(rankdir='LR') + + for node in sorted_nodes: + color = _get_node_color(node) + + if detail: + label = node.detail + else: + label = node.brief + + graph.node(str(hash(node)), label, shape='box', style='filled', fillcolor=color) + + outgoing_edge_graph = outgoing_edge_graphs.get(node, {}) + for edge in get_outgoing_edges(node, outgoing_edge_graph): + kwargs = {} + upstream_label = edge.UpstreamLabel + downstream_label = edge.DownstreamLabel + selector = edge.Selector + + if show_labels and (upstream_label or downstream_label or selector): + if upstream_label is None: + upstream_label = '' + if selector is not None: + upstream_label += ":" + selector + if downstream_label is None: + downstream_label = '' + if upstream_label != '' and downstream_label != '': + middle = ' {} '.format(_RIGHT_ARROW) + else: + middle = '' + + kwargs['label'] = '{} {} {}'.format(upstream_label, middle, downstream_label) + + upstream_node_id = str(hash(edge.UpstreamNode)) + downstream_node_id = str(hash(edge.DownstreamNode)) + + graph.edge(upstream_node_id, downstream_node_id, **kwargs) + + if pipe: + return graph.pipe() + + graph.view(save_path, cleanup=True) + + return stream_spec diff --git a/ffmpeg/constants.py b/ffmpeg/constants.py new file mode 100644 index 0000000..e10fee8 --- /dev/null +++ b/ffmpeg/constants.py @@ -0,0 +1,60 @@ +''' +Date: 2021.02-24 14:58:57 +LastEditors: Rustle Karl +LastEditTime: 2021.03.20 10:11:32 +''' +import sys + +LINUX = sys.platform == 'linux' +WINDOWS = sys.platform == 'win32' + +# Video Source +VIDEO_SOURCES = { + 'allrgb', 'allyuv', 'color', 'haldclutsrc', 'nullsrc', + 'pal75bars', 'pal100bars', 'rgbtestsrc', 'smptebars', + 'smptehdbars', 'testsrc', 'testsrc2', 'yuvtestsrc' +} + +# CUDA Encoders +H264_NVENC = 'h264_nvenc' +HEVC_NVENC = 'hevc_nvenc' + +# CUDA Decoders +H264_CUVID = 'h264_cuvid' +HEVC_CUVID = 'hevc_cuvid' +MJPEG_CUVID = 'mjpeg_cuvid' +MPEG1_CUVID = 'mpeg1_cuvid' +MPEG2_CUVID = 'mpeg2_cuvid' +MPEG4_CUVID = 'mpeg4_cuvid' +VC1_CUVID = 'vc1_cuvid' +VP8_CUVID = 'vp8_cuvid' +VP9_CUVID = 'vp9_cuvid' + +# Expression +REAL_TIME = '%{localtime:%Y-%m-%d %H-%M-%S}' + +# Format +COPY = 'copy' +RAW_VIDEO = 'rawvideo' +S16LE = 's16le' + +# Pixel Format +RGB24 = 'rgb24' +PCM_S16LE = 'pcm_s16le' + +# PTS +PTS_STARTPTS = 'PTS-STARTPTS' + +# Input/Output +PIPE = 'pipe:' + +# Resolution +HD = HD720 = '1280x720' +FHD = HD1080 = '1920x1080' +QHD = HD2K = HD1440 = '2560x1440' +UHD = HD4K = HD2160 = '3840x2160' + +# Image Formats +IMAGE_FORMATS = {'.bmp', '.gif', '.heif', '.jpeg', '.jpg', '.png', '.raw', '.tiff'} + +JSON_FORMAT = 'json' diff --git a/ffmpeg/expression/__init__.py b/ffmpeg/expression/__init__.py new file mode 100644 index 0000000..e41c7ed --- /dev/null +++ b/ffmpeg/expression/__init__.py @@ -0,0 +1,19 @@ +''' +Date: 2021.04.29 22:31 +Description: Omit +LastEditors: Rustle Karl +LastEditTime: 2021.04.30 09:31:00 +''' +import contextlib + +from .layout import generate_gird_layout + +__all__ = [ + 'generate_gird_layout', + 'generate_resolution', +] + + +def generate_resolution(width, height) -> str: + with contextlib.suppress(Exception): + return f"{int(width)}x{int(height)}" diff --git a/ffmpeg/expression/layout.py b/ffmpeg/expression/layout.py new file mode 100644 index 0000000..d8e9e9b --- /dev/null +++ b/ffmpeg/expression/layout.py @@ -0,0 +1,31 @@ +''' +Date: 2021.04.29 22:31 +Description: Omit +LastEditors: Rustle Karl +LastEditTime: 2021.04.29 22:31 +''' + +# column x row +GIRD_1x4 = '0_0|0_h0|0_h0+h1|0_h0+h1+h2' +GIRD_2x2 = '0_0|0_h0|w0_0|w0_h0' +GIRD_3x3 = '0_0|0_h0|0_h0+h1|w0_0|w0_h0|w0_h0+h1|w0+w3_0|w0+w3_h0|w0+w3_h0+h1' +GIRD_4x4 = '0_0|0_h0|0_h0+h1|0_h0+h1+h2|w0_0|w0_h0|w0_h0+h1|w0_h0+h1+h2|' \ + 'w0+w4_0|w0+w4_h0|w0+w4_h0+h1|w0+w4_h0+h1+h2|w0+w4+w8_0|' \ + 'w0+w4+w8_h0|w0+w4+w8_h0+h1|w0+w4+w8_h0+h1+h2' + + +def generate_gird_layout(column: int, row: int) -> str: + layout = [] + + for position in range(column * row): + _column, _row = divmod(position, row) + co = ['w%d' % (i * column) for i in range(_column)] or ['0'] + ro = ['h%d' % i for i in range(_row)] or ['0'] + layout.append(f"{'+'.join(co)}_{'+'.join(ro)}") + + return '|'.join(layout) + + +if __name__ == '__main__': + print(generate_gird_layout(1, 4)) + print(generate_gird_layout(4, 40)) diff --git a/ffmpeg/filters/__init__.py b/ffmpeg/filters/__init__.py new file mode 100644 index 0000000..3a2bd48 --- /dev/null +++ b/ffmpeg/filters/__init__.py @@ -0,0 +1,13 @@ +''' +Date: 2021.04.25 9:36 +Description : Omit +LastEditors: Rustle Karl +LastEditTime: 2021.04.25 9:36 +''' +from . import afilters, vfilters, avfilters + +__all__ = [ + 'afilters', + 'avfilters', + 'vfilters', +] diff --git a/ffmpeg/filters/afilters.py b/ffmpeg/filters/afilters.py new file mode 100644 index 0000000..6e555c0 --- /dev/null +++ b/ffmpeg/filters/afilters.py @@ -0,0 +1,729 @@ +''' +Date: 2021.02-28 22:29:00 +LastEditors: Rustle Karl +LastEditTime: 2021.04.25 13:31:49 +''' +from ..nodes import FilterableStream, Stream, filterable +from .avfilters import filter + +__all__ = [] + +"""Audio Filters + +https://ffmpeg.org/ffmpeg-filters.html#Audio-Filters +""" + + +@filterable() +def acompressor(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#acompressor""" + return filter(stream, acompressor.__name__, *args, **kwargs) + + +@filterable() +def acontrast(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#acontrast""" + return filter(stream, acontrast.__name__, *args, **kwargs) + + +@filterable() +def acopy(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#acopy""" + return filter(stream, acopy.__name__, *args, **kwargs) + + +@filterable() +def acrossfade(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#acrossfade""" + return filter(stream, acrossfade.__name__, *args, **kwargs) + + +@filterable() +def acrossover(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#acrossover""" + return filter(stream, acrossover.__name__, *args, **kwargs) + + +@filterable() +def acrusher(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#acrusher""" + return filter(stream, acrusher.__name__, *args, **kwargs) + + +@filterable() +def acue(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#acue""" + return filter(stream, acue.__name__, *args, **kwargs) + + +@filterable() +def adeclick(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#adeclick""" + return filter(stream, adeclick.__name__, *args, **kwargs) + + +@filterable() +def adeclip(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#adeclip""" + return filter(stream, adeclip.__name__, *args, **kwargs) + + +@filterable() +def adelay(stream: Stream, delays: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#adelay""" + return filter(stream, adelay.__name__, delays=delays) + + +@filterable() +def adenorm(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#adenorm""" + return filter(stream, adenorm.__name__, *args, **kwargs) + + +@filterable() +def aintegral(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aintegral""" + return filter(stream, aintegral.__name__, *args, **kwargs) + + +@filterable() +def aecho(stream: Stream, in_gain: int = None, out_gain: int = None, + delays: str = None, decays: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aecho""" + return filter(stream, aecho.__name__, in_gain=in_gain, out_gain=out_gain, delays=delays, decays=decays) + + +@filterable() +def aemphasis(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aemphasis""" + return filter(stream, aemphasis.__name__, *args, **kwargs) + + +@filterable() +def aeval(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aeval""" + return filter(stream, aeval.__name__, *args, **kwargs) + + +@filterable() +def aexciter(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aexciter""" + return filter(stream, aexciter.__name__, *args, **kwargs) + + +@filterable() +def afade(stream: Stream, fadein: bool = False, fadeout: bool = False, + start_sample: int = None, nb_samples: int = None, start_time: int = None, + duration: int = None, curve: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#afade""" + afade_type = "in" if (fadein and not fadeout) else "out" + return filter(stream, afade.__name__, t=afade_type, ss=start_sample, ns=nb_samples, st=start_time, d=duration, + curve=curve) + + +@filterable() +def afftdn(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#afftdn""" + return filter(stream, afftdn.__name__, *args, **kwargs) + + +@filterable() +def afftfilt(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#afftfilt""" + return filter(stream, afftfilt.__name__, *args, **kwargs) + + +@filterable() +def afir(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#afir""" + return filter(stream, afir.__name__, *args, **kwargs) + + +@filterable() +def aformat(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aformat""" + return filter(stream, aformat.__name__, *args, **kwargs) + + +@filterable() +def afreqshift(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#afreqshift""" + return filter(stream, afreqshift.__name__, *args, **kwargs) + + +@filterable() +def agate(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#agate""" + return filter(stream, agate.__name__, *args, **kwargs) + + +@filterable() +def aiir(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aiir""" + return filter(stream, aiir.__name__, *args, **kwargs) + + +@filterable() +def alimiter(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#alimiter""" + return filter(stream, alimiter.__name__, *args, **kwargs) + + +@filterable() +def allpass(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#allpass""" + return filter(stream, allpass.__name__, *args, **kwargs) + + +@filterable() +def aloop(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aloop""" + return filter(stream, aloop.__name__, *args, **kwargs) + + +@filterable() +def amerge(stream: Stream, inputs: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#amerge""" + return filter(stream, amerge.__name__, inputs=inputs) + + +@filterable() +def amix(stream: Stream, inputs: int = None, duration: str = None, + dropout_transition: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#amix""" + return filter(stream, amix.__name__, inputs=inputs, duration=duration, dropout_transition=dropout_transition) + + +@filterable() +def amultiply(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#amultiply""" + return filter(stream, amultiply.__name__, *args, **kwargs) + + +@filterable() +def anequalizer(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#anequalizer""" + return filter(stream, anequalizer.__name__, *args, **kwargs) + + +@filterable() +def anlmdn(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#anlmdn""" + return filter(stream, anlmdn.__name__, *args, **kwargs) + + +@filterable() +def anlms(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#anlms""" + return filter(stream, anlms.__name__, *args, **kwargs) + + +@filterable() +def anull(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#anull""" + return filter(stream, anull.__name__, *args, **kwargs) + + +@filterable() +def apad(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#apad""" + return filter(stream, apad.__name__, *args, **kwargs) + + +@filterable() +def aphaser(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aphaser""" + return filter(stream, aphaser.__name__, *args, **kwargs) + + +@filterable() +def aphaseshift(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aphaseshift""" + return filter(stream, aphaseshift.__name__, *args, **kwargs) + + +@filterable() +def apulsator(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#apulsator""" + return filter(stream, apulsator.__name__, *args, **kwargs) + + +@filterable() +def aresample(stream: Stream, inputs: int = None, duration: str = None, + dropout_transition: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aresample""" + return filter(stream, aresample.__name__, inputs=inputs, duration=duration, dropout_transition=dropout_transition) + + +@filterable() +def areverse(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#areverse""" + return filter(stream, areverse.__name__, *args, **kwargs) + + +@filterable() +def arnndn(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#arnndn""" + return filter(stream, arnndn.__name__, *args, **kwargs) + + +@filterable() +def asetnsamples(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asetnsamples""" + return filter(stream, asetnsamples.__name__, *args, **kwargs) + + +@filterable() +def asetrate(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asetrate""" + return filter(stream, asetrate.__name__, *args, **kwargs) + + +@filterable() +def ashowinfo(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ashowinfo""" + return filter(stream, ashowinfo.__name__, *args, **kwargs) + + +@filterable() +def asoftclip(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asoftclip""" + return filter(stream, asoftclip.__name__, *args, **kwargs) + + +@filterable() +def asr(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asr""" + return filter(stream, asr.__name__, *args, **kwargs) + + +@filterable() +def astats(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#astats""" + return filter(stream, astats.__name__, *args, **kwargs) + + +@filterable() +def asubboost(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asubboost""" + return filter(stream, asubboost.__name__, *args, **kwargs) + + +@filterable() +def asubcut(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asubcut""" + return filter(stream, asubcut.__name__, *args, **kwargs) + + +@filterable() +def asupercut(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asupercut""" + return filter(stream, asupercut.__name__, *args, **kwargs) + + +@filterable() +def asuperpass(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asuperpass""" + return filter(stream, asuperpass.__name__, *args, **kwargs) + + +@filterable() +def asuperstop(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asuperstop""" + return filter(stream, asuperstop.__name__, *args, **kwargs) + + +@filterable() +def atempo(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#atempo""" + return filter(stream, atempo.__name__, *args, **kwargs) + + +@filterable() +def atrim(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#atrim""" + return filter(stream, atrim.__name__, *args, **kwargs) + + +@filterable() +def axcorrelate(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#axcorrelate""" + return filter(stream, axcorrelate.__name__, *args, **kwargs) + + +@filterable() +def bandpass(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#bandpass""" + return filter(stream, bandpass.__name__, *args, **kwargs) + + +@filterable() +def bandreject(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#bandreject""" + return filter(stream, bandreject.__name__, *args, **kwargs) + + +@filterable() +def lowshelf(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lowshelf""" + return filter(stream, lowshelf.__name__, *args, **kwargs) + + +@filterable() +def biquad(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#biquad""" + return filter(stream, biquad.__name__, *args, **kwargs) + + +@filterable() +def bs2b(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#bs2b""" + return filter(stream, bs2b.__name__, *args, **kwargs) + + +@filterable() +def channelmap(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#channelmap""" + return filter(stream, channelmap.__name__, *args, **kwargs) + + +@filterable() +def channelsplit(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#channelsplit""" + return filter(stream, channelsplit.__name__, *args, **kwargs) + + +@filterable() +def chorus(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#chorus""" + return filter(stream, chorus.__name__, *args, **kwargs) + + +@filterable() +def compand(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#compand""" + return filter(stream, compand.__name__, *args, **kwargs) + + +@filterable() +def compensationdelay(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#compensationdelay""" + return filter(stream, compensationdelay.__name__, *args, **kwargs) + + +@filterable() +def crossfeed(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#crossfeed""" + return filter(stream, crossfeed.__name__, *args, **kwargs) + + +@filterable() +def crystalizer(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#crystalizer""" + return filter(stream, crystalizer.__name__, *args, **kwargs) + + +@filterable() +def dcshift(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#dcshift""" + return filter(stream, dcshift.__name__, *args, **kwargs) + + +@filterable() +def deesser(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#deesser""" + return filter(stream, deesser.__name__, *args, **kwargs) + + +@filterable() +def drmeter(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#drmeter""" + return filter(stream, drmeter.__name__, *args, **kwargs) + + +@filterable() +def dynaudnorm(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#dynaudnorm""" + return filter(stream, dynaudnorm.__name__, *args, **kwargs) + + +@filterable() +def earwax(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#earwax""" + return filter(stream, earwax.__name__, *args, **kwargs) + + +@filterable() +def equalizer(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#equalizer""" + return filter(stream, equalizer.__name__, *args, **kwargs) + + +@filterable() +def extrastereo(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#extrastereo""" + return filter(stream, extrastereo.__name__, *args, **kwargs) + + +@filterable() +def firequalizer(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#firequalizer""" + return filter(stream, firequalizer.__name__, *args, **kwargs) + + +@filterable() +def flanger(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#flanger""" + return filter(stream, flanger.__name__, *args, **kwargs) + + +@filterable() +def haas(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#haas""" + return filter(stream, haas.__name__, *args, **kwargs) + + +@filterable() +def hdcd(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hdcd""" + return filter(stream, hdcd.__name__, *args, **kwargs) + + +@filterable() +def headphone(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#headphone""" + return filter(stream, headphone.__name__, *args, **kwargs) + + +@filterable() +def highpass(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#highpass""" + return filter(stream, highpass.__name__, *args, **kwargs) + + +@filterable() +def join(*streams: Stream, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#join""" + return filter(streams, join.__name__, min_inputs=2, **kwargs) + + +@filterable() +def ladspa(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ladspa""" + return filter(stream, ladspa.__name__, *args, **kwargs) + + +@filterable() +def loudnorm(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#loudnorm""" + return filter(stream, loudnorm.__name__, *args, **kwargs) + + +@filterable() +def lowpass(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lowpass""" + return filter(stream, lowpass.__name__, *args, **kwargs) + + +@filterable() +def lv2(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lv2""" + return filter(stream, lv2.__name__, *args, **kwargs) + + +@filterable() +def mcompand(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#mcompand""" + return filter(stream, mcompand.__name__, *args, **kwargs) + + +@filterable() +def pan(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#pan""" + return filter(stream, pan.__name__, *args, **kwargs) + + +@filterable() +def replaygain(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#replaygain""" + return filter(stream, replaygain.__name__, *args, **kwargs) + + +@filterable() +def resample(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#resample""" + return filter(stream, resample.__name__, *args, **kwargs) + + +@filterable() +def rubberband(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#rubberband""" + return filter(stream, rubberband.__name__, *args, **kwargs) + + +@filterable() +def sidechaincompress(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sidechaincompress""" + return filter(stream, sidechaincompress.__name__, *args, **kwargs) + + +@filterable() +def sidechaingate(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sidechaingate""" + return filter(stream, sidechaingate.__name__, *args, **kwargs) + + +@filterable() +def silencedetect(stream: Stream, noise: float, duration: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#silencedetect""" + return filter(stream, silencedetect.__name__, n=f"{noise}dB", d=duration) + + +@filterable() +def silenceremove(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#silenceremove""" + return filter(stream, silenceremove.__name__, *args, **kwargs) + + +@filterable() +def sofalizer(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sofalizer""" + return filter(stream, sofalizer.__name__, *args, **kwargs) + + +@filterable() +def speechnorm(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#speechnorm""" + return filter(stream, speechnorm.__name__, *args, **kwargs) + + +@filterable() +def stereotools(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#stereotools""" + return filter(stream, stereotools.__name__, *args, **kwargs) + + +@filterable() +def stereowiden(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#stereowiden""" + return filter(stream, stereowiden.__name__, *args, **kwargs) + + +@filterable() +def superequalizer(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#superequalizer""" + return filter(stream, superequalizer.__name__, *args, **kwargs) + + +@filterable() +def surround(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#surround""" + return filter(stream, surround.__name__, *args, **kwargs) + + +@filterable() +def highshelf(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#highshelf""" + return filter(stream, highshelf.__name__, *args, **kwargs) + + +@filterable() +def tremolo(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tremolo""" + return filter(stream, tremolo.__name__, *args, **kwargs) + + +@filterable() +def vibrato(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vibrato""" + return filter(stream, vibrato.__name__, *args, **kwargs) + + +@filterable() +def volume(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#volume""" + return filter(stream, volume.__name__, *args, **kwargs) + + +@filterable() +def volumedetect(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#volumedetect""" + return filter(stream, volumedetect.__name__, *args, **kwargs) + + +"""Audio Sources + +https://ffmpeg.org/ffmpeg-filters.html#Audio-Sources +""" + + +@filterable() +def abuffer(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#abuffer""" + return filter(stream, abuffer.__name__, *args, **kwargs) + + +@filterable() +def aevalsrc(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aevalsrc""" + return filter(stream, aevalsrc.__name__, *args, **kwargs) + + +@filterable() +def afirsrc(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#afirsrc""" + return filter(stream, afirsrc.__name__, *args, **kwargs) + + +@filterable() +def anullsrc(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#anullsrc""" + return filter(stream, anullsrc.__name__, *args, **kwargs) + + +@filterable() +def flite(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#flite""" + return filter(stream, flite.__name__, *args, **kwargs) + + +@filterable() +def anoisesrc(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#anoisesrc""" + return filter(stream, anoisesrc.__name__, *args, **kwargs) + + +@filterable() +def hilbert(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hilbert""" + return filter(stream, hilbert.__name__, *args, **kwargs) + + +@filterable() +def sinc(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sinc""" + return filter(stream, sinc.__name__, *args, **kwargs) + + +@filterable() +def sine(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sine""" + return filter(stream, sine.__name__, *args, **kwargs) + + +"""Audio Sinks + +https://ffmpeg.org/ffmpeg-filters.html#Audio-Sinks +""" + + +@filterable() +def abuffersink(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#abuffersink""" + return filter(stream, abuffersink.__name__, *args, **kwargs) + + +@filterable() +def anullsink(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#anullsink""" + return filter(stream, anullsink.__name__, *args, **kwargs) diff --git a/ffmpeg/filters/avfilters.py b/ffmpeg/filters/avfilters.py new file mode 100644 index 0000000..05efd82 --- /dev/null +++ b/ffmpeg/filters/avfilters.py @@ -0,0 +1,294 @@ +''' +Date: 2021.02-25 14:34:07 +LastEditors: Rustle Karl +LastEditTime: 2021.04.25 13:31:36 +''' +import contextlib +from typing import List + +from .._utils import drop_empty_dict_values +from ..nodes import filterable, FilterableStream, FilterNode, Stream + +# https://ffmpeg.org/ffmpeg-filters.html + +__all__ = [ + 'concat', + 'filter', + 'filter_multi_output', +] + + +@filterable() +def filter_multi_output(stream_spec, label: str, *args, **kwargs) -> FilterNode: + max_inputs, min_inputs = 1, 1 + + with contextlib.suppress(KeyError): + max_inputs = kwargs.pop("max_inputs") + + with contextlib.suppress(KeyError): + min_inputs = kwargs.pop("min_inputs") + + return FilterNode( + streams=stream_spec, + label=label, + min_inputs=min_inputs, + max_inputs=max_inputs, + args=args, + kwargs=drop_empty_dict_values({}, **kwargs), + ) + + +@filterable() +def filter(stream, label: str, *args, **kwargs) -> FilterableStream: + return filter_multi_output(stream, label, *args, **kwargs).stream() + + +"""Multimedia Filters + +https://ffmpeg.org/ffmpeg-filters.html#Multimedia-Filters +""" + + +@filterable() +def abitscope(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#abitscope""" + return filter(stream, abitscope.__name__, *args, **kwargs) + + +@filterable() +def adrawgraph(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#adrawgraph""" + return filter(stream, adrawgraph.__name__, *args, **kwargs) + + +@filterable() +def agraphmonitor(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#agraphmonitor""" + return filter(stream, agraphmonitor.__name__, *args, **kwargs) + + +@filterable() +def ahistogram(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ahistogram""" + return filter(stream, ahistogram.__name__, *args, **kwargs) + + +@filterable() +def aphasemeter(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aphasemeter""" + return filter(stream, aphasemeter.__name__, *args, **kwargs) + + +@filterable() +def avectorscope(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#avectorscope""" + return filter(stream, avectorscope.__name__, *args, **kwargs) + + +@filterable() +def abench(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#abench""" + return filter(stream, abench.__name__, *args, **kwargs) + + +@filterable() +def concat(*streams: Stream, v: int = None, a: int = None, n: int = None, + unsafe: bool = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#concat""" + stream_count = 0 + + if isinstance(v, int): + stream_count += v + + if isinstance(a, int): + stream_count += a + + if stream_count == 0: + stream_count = 1 + + if len(streams) % stream_count != 0: + raise ValueError(f'Expected concat input streams to have length multiple ' + f'of {stream_count} (v={v}, a={a}); got {len(streams)}') + + return filter(streams, concat.__name__, min_inputs=2, max_inputs=None, n=n or len(streams) // stream_count, v=v, + a=a, unsafe=unsafe) + + +@filterable() +def ebur128(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ebur128""" + return filter(stream, ebur128.__name__, *args, **kwargs) + + +@filterable() +def ainterleave(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ainterleave""" + return filter(stream, ainterleave.__name__, *args, **kwargs) + + +@filterable() +def ametadata(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ametadata""" + return filter(stream, ametadata.__name__, *args, **kwargs) + + +@filterable() +def aperms(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aperms""" + return filter(stream, aperms.__name__, *args, **kwargs) + + +@filterable() +def arealtime(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#arealtime""" + return filter(stream, arealtime.__name__, *args, **kwargs) + + +@filterable() +def aselect(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aselect""" + return filter(stream, aselect.__name__, *args, **kwargs) + + +@filterable() +def asendcmd(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asendcmd""" + return filter(stream, asendcmd.__name__, *args, **kwargs) + + +@filterable() +def setpts(stream: Stream, expr: str = "PTS-STARTPTS") -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#setpts""" + return filter(stream, setpts.__name__, expr) + + +@filterable() +def asetpts(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asetpts""" + return filter(stream, asetpts.__name__, *args, **kwargs) + + +@filterable() +def setrange(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#setrange""" + return filter(stream, setrange.__name__, *args, **kwargs) + + +@filterable() +def asettb(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asettb""" + return filter(stream, asettb.__name__, *args, **kwargs) + + +@filterable() +def select(stream: Stream, expr: str) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#select""" + return filter(stream, select.__name__, expr) + + +@filterable() +def showcqt(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showcqt""" + return filter(stream, showcqt.__name__, *args, **kwargs) + + +@filterable() +def showfreqs(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showfreqs""" + return filter(stream, showfreqs.__name__, *args, **kwargs) + + +@filterable() +def showspatial(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showspatial""" + return filter(stream, showspatial.__name__, *args, **kwargs) + + +@filterable() +def showspectrum(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showspectrum""" + return filter(stream, showspectrum.__name__, *args, **kwargs) + + +@filterable() +def showspectrumpic(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showspectrumpic""" + return filter(stream, showspectrumpic.__name__, *args, **kwargs) + + +@filterable() +def showvolume(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showvolume""" + return filter(stream, showvolume.__name__, *args, **kwargs) + + +@filterable() +def showwaves(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showwaves""" + return filter(stream, showwaves.__name__, *args, **kwargs) + + +@filterable() +def showwavespic(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showwavespic""" + return filter(stream, showwavespic.__name__, *args, **kwargs) + + +@filterable() +def sidedata(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asidedata""" + return filter(stream, sidedata.__name__, *args, **kwargs) + + +@filterable() +def asidedata(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asidedata""" + return filter(stream, asidedata.__name__, *args, **kwargs) + + +@filterable() +def spectrumsynth(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#spectrumsynth""" + return filter(stream, spectrumsynth.__name__, *args, **kwargs) + + +@filterable() +def split(stream: Stream) -> List[FilterableStream]: + """https://ffmpeg.org/ffmpeg-filters.html#asplit""" + return FilterNode(stream, split.__name__) + + +@filterable() +def asplit(stream: Stream) -> List[FilterableStream]: + """https://ffmpeg.org/ffmpeg-filters.html#asplit""" + return FilterNode(stream, asplit.__name__) + + +@filterable() +def zmq(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#azmq""" + return filter(stream, zmq.__name__, *args, **kwargs) + + +@filterable() +def azmq(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#azmq""" + return filter(stream, azmq.__name__, *args, **kwargs) + + +"""Multimedia Sources + +https://ffmpeg.org/ffmpeg-filters.html#Multimedia-Sources +""" + + +@filterable() +def amovie(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#amovie""" + return filter(stream, amovie.__name__, *args, **kwargs) + + +@filterable() +def movie(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#movie""" + return filter(stream, movie.__name__, *args, **kwargs) diff --git a/ffmpeg/filters/vfilters.py b/ffmpeg/filters/vfilters.py new file mode 100644 index 0000000..26eb7d2 --- /dev/null +++ b/ffmpeg/filters/vfilters.py @@ -0,0 +1,1846 @@ +''' +Date: 2021.03.03 08:17:05 +LastEditors: Rustle Karl +LastEditTime: 2021.04.25 13:31:29 +''' +from pathlib import Path +from typing import Union + +from .avfilters import filter +from .._utils import drop_empty_dict_values +from ..constants import LINUX +from ..nodes import filterable, FilterableStream, FilterNode, Stream + +__all__ = [ + "gltransition", + "overlay", +] + +"""OpenGL Filters""" + + +@filterable() +def gltransition(*streams: Stream, source: Union[str, Path] = None, + offset: float = 0, duration: float = 0) -> FilterableStream: + """Combine two videos with transition effects. + + Args: + source: Transition effect source file. + offset: Specify the transition effect to start at offset seconds of the first video. + duration: Duration of transition effect. + """ + if not LINUX: + raise NotImplementedError('Only supports Linux system, and FFmpeg must be recompiled') + + return FilterNode(streams, gltransition.__name__, max_inputs=2, + kwargs=drop_empty_dict_values({}, source=source, offset=offset, duration=duration)).stream() + + +"""Video Filters + +https://ffmpeg.org/ffmpeg-filters.html#Video-Filters +""" + + +@filterable() +def addroi(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#addroi""" + return filter(stream, addroi.__name__, *args, **kwargs) + + +@filterable() +def alphaextract(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#alphaextract""" + return filter(stream, alphaextract.__name__, *args, **kwargs) + + +@filterable() +def alphamerge(*streams: Stream) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#alphamerge""" + return filter(streams, alphamerge.__name__, max_inputs=2) + + +@filterable() +def amplify(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#amplify""" + return filter(stream, amplify.__name__, *args, **kwargs) + + +@filterable() +def ass(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ass""" + return filter(stream, ass.__name__, *args, **kwargs) + + +@filterable() +def atadenoise(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#atadenoise""" + return filter(stream, atadenoise.__name__, *args, **kwargs) + + +@filterable() +def avgblur(stream: Stream, x: int = None, y: int = None, + planes: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#avgblur""" + return filter(stream, avgblur.__name__, sizeX=x, sizeY=y, planes=planes) + + +@filterable() +def bbox(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#bbox""" + return filter(stream, bbox.__name__, *args, **kwargs) + + +@filterable() +def bilateral(stream: Stream, s: float = None, r: float = None, + planes: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#bilateral""" + return filter(stream, bilateral.__name__, sigmaS=s, sigmaR=r, planes=planes) + + +@filterable() +def bitplanenoise(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#bitplanenoise""" + return filter(stream, bitplanenoise.__name__, *args, **kwargs) + + +@filterable() +def blackdetect(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#blackdetect""" + return filter(stream, blackdetect.__name__, *args, **kwargs) + + +@filterable() +def blackframe(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#blackframe""" + return filter(stream, blackframe.__name__, *args, **kwargs) + + +@filterable() +def blend(*stream: Stream, all_mode: str = None, all_opacity: float = None, + all_expr: str = None, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#blend""" + return filter(stream, blend.__name__, max_inputs=2, all_mode=all_mode, all_opacity=all_opacity, all_expr=all_expr, + **kwargs) + + +@filterable() +def bm3d(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#bm3d""" + return filter(stream, bm3d.__name__, *args, **kwargs) + + +@filterable() +def boxblur(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#boxblur""" + return filter(stream, boxblur.__name__, *args, **kwargs) + + +@filterable() +def bwdif(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#bwdif""" + return filter(stream, bwdif.__name__, *args, **kwargs) + + +@filterable() +def cas(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#cas""" + return filter(stream, cas.__name__, *args, **kwargs) + + +@filterable() +def chromahold(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#chromahold""" + return filter(stream, chromahold.__name__, *args, **kwargs) + + +@filterable() +def chromakey(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#chromakey""" + return filter(stream, chromakey.__name__, *args, **kwargs) + + +@filterable() +def chromanr(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#chromanr""" + return filter(stream, chromanr.__name__, *args, **kwargs) + + +@filterable() +def chromashift(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#chromashift""" + return filter(stream, chromashift.__name__, *args, **kwargs) + + +@filterable() +def ciescope(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ciescope""" + return filter(stream, ciescope.__name__, *args, **kwargs) + + +@filterable() +def codecview(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#codecview""" + return filter(stream, codecview.__name__, *args, **kwargs) + + +@filterable() +def colorbalance(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorbalance""" + return filter(stream, colorbalance.__name__, *args, **kwargs) + + +@filterable() +def colorcontrast(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorcontrast""" + return filter(stream, colorcontrast.__name__, *args, **kwargs) + + +@filterable() +def colorcorrect(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorcorrect""" + return filter(stream, colorcorrect.__name__, *args, **kwargs) + + +@filterable() +def colorchannelmixer(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorchannelmixer""" + return filter(stream, colorchannelmixer.__name__, *args, **kwargs) + + +@filterable() +def colorize(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorize""" + return filter(stream, colorize.__name__, *args, **kwargs) + + +@filterable() +def colorkey(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorkey""" + return filter(stream, colorkey.__name__, *args, **kwargs) + + +@filterable() +def colorhold(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorhold""" + return filter(stream, colorhold.__name__, *args, **kwargs) + + +@filterable() +def colorlevels(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorlevels""" + return filter(stream, colorlevels.__name__, *args, **kwargs) + + +@filterable() +def colormatrix(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colormatrix""" + return filter(stream, colormatrix.__name__, *args, **kwargs) + + +@filterable() +def colorspace(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorspace""" + return filter(stream, colorspace.__name__, *args, **kwargs) + + +@filterable() +def colortemperature(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colortemperature""" + return filter(stream, colortemperature.__name__, *args, **kwargs) + + +@filterable() +def convolution(stream: Stream, m0: int = None, m1: int = None, m2: int = None, + m3: int = None, rdiv0: int = None, rdiv1: int = None, rdiv2: int = None, + rdiv3: int = None, bias0: int = None, bias1: int = None, bias2: int = None, + bias3: int = None, mode0: int = None, mode1: int = None, + mode2: int = None, mode3: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#convolution""" + + kwargs = { + "0m": m0, "1m": m1, "2m": m2, "3m": m3, + "0rdiv": rdiv0, "1rdiv": rdiv1, "2rdiv": rdiv2, "3rdiv": rdiv3, + "0bias": bias0, "1bias": bias1, "2bias": bias2, "3bias": bias3, + "0mode": mode0, "1mode": mode1, "2mode": mode2, "3mode": mode3 + } + + return filter(stream, convolution.__name__, **kwargs) + + +@filterable() +def convolve(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#convolve""" + return filter(stream, convolve.__name__, *args, **kwargs) + + +@filterable() +def copy(stream: Stream) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#copy""" + return filter(stream, copy.__name__) + + +@filterable() +def coreimage(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#coreimage""" + return filter(stream, coreimage.__name__, *args, **kwargs) + + +@filterable() +def cover_rect(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#cover_rect""" + return filter(stream, cover_rect.__name__, *args, **kwargs) + + +@filterable() +def crop(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#crop""" + return filter(stream, crop.__name__, *args, **kwargs) + + +@filterable() +def cropdetect(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#cropdetect""" + return filter(stream, cropdetect.__name__, *args, **kwargs) + + +@filterable() +def cue(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#cue""" + return filter(stream, cue.__name__, *args, **kwargs) + + +@filterable() +def curves(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#curves""" + return filter(stream, curves.__name__, *args, **kwargs) + + +@filterable() +def datascope(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#datascope""" + return filter(stream, datascope.__name__, *args, **kwargs) + + +@filterable() +def dblur(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#dblur""" + return filter(stream, dblur.__name__, *args, **kwargs) + + +@filterable() +def dctdnoiz(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#dctdnoiz""" + return filter(stream, dctdnoiz.__name__, *args, **kwargs) + + +@filterable() +def deband(stream: Stream, thr1: float = None, thr2: float = None, + thr3: float = None, thr4: float = None, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#deband""" + kwargs.update({ + "1thr": thr1, "2thr": thr2, + "3thr": thr3, "4thr": thr4, + }) + + return filter(stream, deband.__name__, **kwargs) + + +@filterable() +def deblock(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#deblock""" + return filter(stream, deblock.__name__, *args, **kwargs) + + +@filterable() +def decimate(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#decimate""" + return filter(stream, decimate.__name__, *args, **kwargs) + + +@filterable() +def deconvolve(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#deconvolve""" + return filter(stream, deconvolve.__name__, *args, **kwargs) + + +@filterable() +def dedot(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#dedot""" + return filter(stream, dedot.__name__, *args, **kwargs) + + +@filterable() +def deflate(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#deflate""" + return filter(stream, deflate.__name__, *args, **kwargs) + + +@filterable() +def deflicker(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#deflicker""" + return filter(stream, deflicker.__name__, *args, **kwargs) + + +@filterable() +def dejudder(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#dejudder""" + return filter(stream, dejudder.__name__, *args, **kwargs) + + +@filterable() +def delogo(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#delogo""" + return filter(stream, delogo.__name__, *args, **kwargs) + + +@filterable() +def derain(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#derain""" + return filter(stream, derain.__name__, *args, **kwargs) + + +@filterable() +def deshake(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#deshake""" + return filter(stream, deshake.__name__, *args, **kwargs) + + +@filterable() +def despill(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#despill""" + return filter(stream, despill.__name__, *args, **kwargs) + + +@filterable() +def detelecine(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#detelecine""" + return filter(stream, detelecine.__name__, *args, **kwargs) + + +@filterable() +def dilation(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#dilation""" + return filter(stream, dilation.__name__, *args, **kwargs) + + +@filterable() +def displace(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#displace""" + return filter(stream, displace.__name__, *args, **kwargs) + + +@filterable() +def dnn_processing(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#dnn_processing""" + if kwargs.get("set_async"): + kwargs["async"] = kwargs.pop("set_async") + return filter(stream, dnn_processing.__name__, *args, **kwargs) + + +@filterable() +def drawbox(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#drawbox""" + return filter(stream, drawbox.__name__, *args, **kwargs) + + +@filterable() +def drawgraph(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#drawgraph""" + return filter(stream, drawgraph.__name__, *args, **kwargs) + + +@filterable() +def drawgrid(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#drawgrid""" + return filter(stream, drawgrid.__name__, *args, **kwargs) + + +@filterable() +def drawtext(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#drawtext""" + return filter(stream, drawtext.__name__, *args, **kwargs) + + +@filterable() +def edgedetect(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#edgedetect""" + return filter(stream, edgedetect.__name__, *args, **kwargs) + + +@filterable() +def elbg(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#elbg""" + return filter(stream, elbg.__name__, *args, **kwargs) + + +@filterable() +def entropy(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#entropy""" + return filter(stream, entropy.__name__, *args, **kwargs) + + +@filterable() +def epx(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#epx""" + return filter(stream, epx.__name__, *args, **kwargs) + + +@filterable() +def eq(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#eq""" + return filter(stream, eq.__name__, *args, **kwargs) + + +@filterable() +def erosion(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#erosion""" + return filter(stream, erosion.__name__, *args, **kwargs) + + +@filterable() +def estdif(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#estdif""" + return filter(stream, estdif.__name__, *args, **kwargs) + + +@filterable() +def exposure(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#exposure""" + return filter(stream, exposure.__name__, *args, **kwargs) + + +@filterable() +def extractplanes(stream: Stream, planes: str = None) -> FilterNode: + """https://ffmpeg.org/ffmpeg-filters.html#extractplanes""" + return FilterNode(stream, extractplanes.__name__, args=[planes]) + + +@filterable() +def fade(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fade""" + return filter(stream, fade.__name__, *args, **kwargs) + + +@filterable() +def fftdnoiz(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fftdnoiz""" + return filter(stream, fftdnoiz.__name__, *args, **kwargs) + + +@filterable() +def fftfilt(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fftfilt""" + return filter(stream, fftfilt.__name__, *args, **kwargs) + + +@filterable() +def field(stream: Stream, t: Union[int, str] = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#field""" + return filter(stream, field.__name__, type=t) + + +@filterable() +def fieldhint(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fieldhint""" + return filter(stream, fieldhint.__name__, *args, **kwargs) + + +@filterable() +def fieldmatch(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fieldmatch""" + return filter(stream, fieldmatch.__name__, *args, **kwargs) + + +@filterable() +def fieldorder(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fieldorder""" + return filter(stream, fieldorder.__name__, *args, **kwargs) + + +@filterable() +def afifo(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#afifo""" + return filter(stream, afifo.__name__, *args, **kwargs) + + +@filterable() +def fillborders(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fillborders""" + return filter(stream, fillborders.__name__, *args, **kwargs) + + +@filterable() +def find_rect(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#find_rect""" + return filter(stream, find_rect.__name__, *args, **kwargs) + + +@filterable() +def floodfill(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#floodfill""" + return filter(stream, floodfill.__name__, *args, **kwargs) + + +@filterable() +def format(stream: Stream, *pix_fmts: str) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#format""" + return filter(stream, format.__name__, pix_fmts="|".join(pix_fmts) or None) + + +@filterable() +def fps(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fps""" + return filter(stream, fps.__name__, *args, **kwargs) + + +@filterable() +def framepack(*streams: Stream, format: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#framepack""" + return filter(streams, framepack.__name__, max_inputs=2, format=format) + + +@filterable() +def framerate(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#framerate""" + return filter(stream, framerate.__name__, *args, **kwargs) + + +@filterable() +def framestep(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#framestep""" + return filter(stream, framestep.__name__, *args, **kwargs) + + +@filterable() +def freezedetect(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#freezedetect""" + return filter(stream, freezedetect.__name__, *args, **kwargs) + + +@filterable() +def freezeframes(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#freezeframes""" + return filter(stream, freezeframes.__name__, *args, **kwargs) + + +@filterable() +def frei0r(stream: Stream, filter_name: str, filter_params: str) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#frei0r""" + return filter(stream, frei0r.__name__, filter_name, filter_params) + + +@filterable() +def fspp(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fspp""" + return filter(stream, fspp.__name__, *args, **kwargs) + + +@filterable() +def gblur(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#gblur""" + return filter(stream, gblur.__name__, *args, **kwargs) + + +@filterable() +def geq(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#geq""" + try: + kwargs["sigmaV"] = kwargs.pop("sigma_v") + except KeyError: + pass + + return filter(stream, geq.__name__, *args, **kwargs) + + +@filterable() +def gradfun(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#gradfun""" + return filter(stream, gradfun.__name__, *args, **kwargs) + + +@filterable() +def graphmonitor(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#graphmonitor""" + return filter(stream, graphmonitor.__name__, *args, **kwargs) + + +@filterable() +def greyedge(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#greyedge""" + return filter(stream, greyedge.__name__, *args, **kwargs) + + +@filterable() +def haldclut(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#haldclut""" + return filter(stream, haldclut.__name__, *args, **kwargs) + + +@filterable() +def hflip(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hflip""" + return filter(stream, hflip.__name__, *args, **kwargs) + + +@filterable() +def histeq(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#histeq""" + return filter(stream, histeq.__name__, *args, **kwargs) + + +@filterable() +def histogram(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#histogram""" + return filter(stream, histogram.__name__, *args, **kwargs) + + +@filterable() +def hqdn3d(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hqdn3d""" + return filter(stream, hqdn3d.__name__, *args, **kwargs) + + +@filterable() +def hwdownload(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hwdownload""" + return filter(stream, hwdownload.__name__, *args, **kwargs) + + +@filterable() +def hwmap(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hwmap""" + return filter(stream, hwmap.__name__, *args, **kwargs) + + +@filterable() +def hwupload(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hwupload""" + return filter(stream, hwupload.__name__, *args, **kwargs) + + +@filterable() +def hwupload_cuda(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hwupload_cuda""" + return filter(stream, hwupload_cuda.__name__, *args, **kwargs) + + +@filterable() +def hqx(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hqx""" + return filter(stream, hqx.__name__, *args, **kwargs) + + +@filterable() +def hstack(*streams: Stream, inputs: int = None, + shortest: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hstack""" + return filter(streams, hstack.__name__, inputs=inputs or len(streams), shortest=shortest, min_inputs=2) + + +@filterable() +def hue(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hue""" + return filter(stream, hue.__name__, *args, **kwargs) + + +@filterable() +def hysteresis(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hysteresis""" + return filter(stream, hysteresis.__name__, *args, **kwargs) + + +@filterable() +def identity(*stream: Stream) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#identity""" + return filter(stream, identity.__name__, min_inputs=2) + + +@filterable() +def idet(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#idet""" + return filter(stream, idet.__name__, *args, **kwargs) + + +@filterable() +def il(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#il""" + return filter(stream, il.__name__, *args, **kwargs) + + +@filterable() +def inflate(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#inflate""" + return filter(stream, inflate.__name__, *args, **kwargs) + + +@filterable() +def interlace(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#interlace""" + return filter(stream, interlace.__name__, *args, **kwargs) + + +@filterable() +def kerndeint(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#kerndeint""" + return filter(stream, kerndeint.__name__, *args, **kwargs) + + +@filterable() +def kirsch(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#kirsch""" + return filter(stream, kirsch.__name__, *args, **kwargs) + + +@filterable() +def lagfun(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lagfun""" + return filter(stream, lagfun.__name__, *args, **kwargs) + + +@filterable() +def lenscorrection(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lenscorrection""" + return filter(stream, lenscorrection.__name__, *args, **kwargs) + + +@filterable() +def lensfun(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lensfun""" + return filter(stream, lensfun.__name__, *args, **kwargs) + + +@filterable() +def libvmaf(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#libvmaf""" + return filter(stream, libvmaf.__name__, *args, **kwargs) + + +@filterable() +def limiter(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#limiter""" + return filter(stream, limiter.__name__, *args, **kwargs) + + +@filterable() +def loop(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#loop""" + return filter(stream, loop.__name__, *args, **kwargs) + + +@filterable() +def lut1d(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lut1d""" + return filter(stream, lut1d.__name__, *args, **kwargs) + + +@filterable() +def lut3d(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lut3d""" + return filter(stream, lut3d.__name__, *args, **kwargs) + + +@filterable() +def lumakey(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lumakey""" + return filter(stream, lumakey.__name__, *args, **kwargs) + + +@filterable() +def lutyuv(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lutyuv""" + return filter(stream, lutyuv.__name__, *args, **kwargs) + + +@filterable() +def tlut2(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tlut2""" + return filter(stream, tlut2.__name__, *args, **kwargs) + + +@filterable() +def maskedclamp(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#maskedclamp""" + return filter(stream, maskedclamp.__name__, *args, **kwargs) + + +@filterable() +def maskedmax(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#maskedmax""" + return filter(stream, maskedmax.__name__, *args, **kwargs) + + +@filterable() +def maskedmerge(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#maskedmerge""" + return filter(stream, maskedmerge.__name__, *args, **kwargs) + + +@filterable() +def maskedmin(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#maskedmin""" + return filter(stream, maskedmin.__name__, *args, **kwargs) + + +@filterable() +def maskedthreshold(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#maskedthreshold""" + return filter(stream, maskedthreshold.__name__, *args, **kwargs) + + +@filterable() +def maskfun(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#maskfun""" + return filter(stream, maskfun.__name__, *args, **kwargs) + + +@filterable() +def mcdeint(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#mcdeint""" + return filter(stream, mcdeint.__name__, *args, **kwargs) + + +@filterable() +def median(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#median""" + return filter(stream, median.__name__, *args, **kwargs) + + +@filterable() +def mergeplanes(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#mergeplanes""" + return filter(stream, mergeplanes.__name__, *args, **kwargs) + + +@filterable() +def mestimate(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#mestimate""" + return filter(stream, mestimate.__name__, *args, **kwargs) + + +@filterable() +def midequalizer(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#midequalizer""" + return filter(stream, midequalizer.__name__, *args, **kwargs) + + +@filterable() +def minterpolate(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#minterpolate""" + return filter(stream, minterpolate.__name__, *args, **kwargs) + + +@filterable() +def mix(*streams: Stream, inputs: int = None, weights: str = None, + scale: str = None, duration: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#mix""" + return filter(streams, mix.__name__, min_inputs=2, inputs=inputs, weights=weights, scale=scale, duration=duration) + + +@filterable() +def monochrome(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#monochrome""" + return filter(stream, monochrome.__name__, *args, **kwargs) + + +@filterable() +def mpdecimate(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#mpdecimate""" + return filter(stream, mpdecimate.__name__, *args, **kwargs) + + +@filterable() +def negate(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#negate""" + return filter(stream, negate.__name__, *args, **kwargs) + + +@filterable() +def nlmeans(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#nlmeans""" + return filter(stream, nlmeans.__name__, *args, **kwargs) + + +@filterable() +def nnedi(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#nnedi""" + return filter(stream, nnedi.__name__, *args, **kwargs) + + +@filterable() +def noformat(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#noformat""" + return filter(stream, noformat.__name__, *args, **kwargs) + + +@filterable() +def noise(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#noise""" + return filter(stream, noise.__name__, *args, **kwargs) + + +@filterable() +def normalize(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#normalize""" + return filter(stream, normalize.__name__, *args, **kwargs) + + +@filterable() +def null(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#null""" + return filter(stream, null.__name__, *args, **kwargs) + + +@filterable() +def ocr(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ocr""" + return filter(stream, ocr.__name__, *args, **kwargs) + + +@filterable() +def ocv(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ocv""" + return filter(stream, ocv.__name__, *args, **kwargs) + + +@filterable() +def oscilloscope(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#oscilloscope""" + return filter(stream, oscilloscope.__name__, *args, **kwargs) + + +@filterable() +def overlay(main_node, overlay_node, x: Union[int, str] = 0, y: Union[int, str] = 0, + eof_action: str = None, eval: str = None, shortest: bool = None, + format: str = None, repeatlast: bool = None, alpha: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#overlay""" + return filter([main_node, overlay_node], overlay.__name__, min_inputs=2, max_inputs=2, x=x, y=y, + eof_action=eof_action, eval=eval, shortest=shortest, format=format, repeatlast=repeatlast, + alpha=alpha) + + +@filterable() +def overlay_cuda(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#overlay_cuda""" + return filter(stream, overlay_cuda.__name__, *args, **kwargs) + + +@filterable() +def owdenoise(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#owdenoise""" + return filter(stream, owdenoise.__name__, *args, **kwargs) + + +@filterable() +def pad(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#pad""" + return filter(stream, pad.__name__, *args, **kwargs) + + +@filterable() +def palettegen(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#palettegen""" + return filter(stream, palettegen.__name__, *args, **kwargs) + + +@filterable() +def paletteuse(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#paletteuse""" + return filter(stream, paletteuse.__name__, *args, **kwargs) + + +@filterable() +def perspective(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#perspective""" + return filter(stream, perspective.__name__, *args, **kwargs) + + +@filterable() +def phase(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#phase""" + return filter(stream, phase.__name__, *args, **kwargs) + + +@filterable() +def photosensitivity(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#photosensitivity""" + return filter(stream, photosensitivity.__name__, *args, **kwargs) + + +@filterable() +def pixdesctest(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#pixdesctest""" + return filter(stream, pixdesctest.__name__, *args, **kwargs) + + +@filterable() +def pixscope(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#pixscope""" + return filter(stream, pixscope.__name__, *args, **kwargs) + + +@filterable() +def pp(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#pp""" + return filter(stream, pp.__name__, *args, **kwargs) + + +@filterable() +def pp7(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#pp7""" + return filter(stream, pp7.__name__, *args, **kwargs) + + +@filterable() +def premultiply(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#premultiply""" + return filter(stream, premultiply.__name__, *args, **kwargs) + + +@filterable() +def prewitt(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#prewitt""" + return filter(stream, prewitt.__name__, *args, **kwargs) + + +@filterable() +def pseudocolor(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#pseudocolor""" + return filter(stream, pseudocolor.__name__, *args, **kwargs) + + +@filterable() +def psnr(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#psnr""" + return filter(stream, psnr.__name__, *args, **kwargs) + + +@filterable() +def pullup(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#pullup""" + return filter(stream, pullup.__name__, *args, **kwargs) + + +@filterable() +def qp(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#qp""" + return filter(stream, qp.__name__, *args, **kwargs) + + +@filterable() +def random(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#random""" + return filter(stream, random.__name__, *args, **kwargs) + + +@filterable() +def readeia608(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#readeia608""" + return filter(stream, readeia608.__name__, *args, **kwargs) + + +@filterable() +def readvitc(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#readvitc""" + return filter(stream, readvitc.__name__, *args, **kwargs) + + +@filterable() +def remap(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#remap""" + return filter(stream, remap.__name__, *args, **kwargs) + + +@filterable() +def removegrain(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#removegrain""" + return filter(stream, removegrain.__name__, *args, **kwargs) + + +@filterable() +def removelogo(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#removelogo""" + return filter(stream, removelogo.__name__, *args, **kwargs) + + +@filterable() +def repeatfields(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#repeatfields""" + return filter(stream, repeatfields.__name__, *args, **kwargs) + + +@filterable() +def reverse(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#reverse""" + return filter(stream, reverse.__name__, *args, **kwargs) + + +@filterable() +def rgbashift(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#rgbashift""" + return filter(stream, rgbashift.__name__, *args, **kwargs) + + +@filterable() +def roberts(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#roberts""" + return filter(stream, roberts.__name__, *args, **kwargs) + + +@filterable() +def rotate(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#rotate""" + return filter(stream, rotate.__name__, *args, **kwargs) + + +@filterable() +def sab(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sab""" + return filter(stream, sab.__name__, *args, **kwargs) + + +@filterable() +def scale(stream: Stream, *args, **kwargs) -> FilterableStream: + return filter(stream, scale.__name__, *args, **kwargs) + + +@filterable() +def scale_npp(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#scale_npp""" + return filter(stream, scale_npp.__name__, *args, **kwargs) + + +@filterable() +def scale2ref(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#scale2ref""" + return filter(stream, scale2ref.__name__, *args, **kwargs) + + +@filterable() +def scroll(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#scroll""" + return filter(stream, scroll.__name__, *args, **kwargs) + + +@filterable() +def scdet(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#scdet""" + return filter(stream, scdet.__name__, *args, **kwargs) + + +@filterable() +def selectivecolor(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#selectivecolor""" + return filter(stream, selectivecolor.__name__, *args, **kwargs) + + +@filterable() +def separatefields(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#separatefields""" + return filter(stream, separatefields.__name__, *args, **kwargs) + + +@filterable() +def setsar(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#setsar""" + return filter(stream, setsar.__name__, *args, **kwargs) + + +@filterable() +def setfield(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#setfield""" + return filter(stream, setfield.__name__, *args, **kwargs) + + +@filterable() +def setparams(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#setparams""" + return filter(stream, setparams.__name__, *args, **kwargs) + + +@filterable() +def shear(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#shear""" + return filter(stream, shear.__name__, *args, **kwargs) + + +@filterable() +def showinfo(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showinfo""" + return filter(stream, showinfo.__name__, *args, **kwargs) + + +@filterable() +def showpalette(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showpalette""" + return filter(stream, showpalette.__name__, *args, **kwargs) + + +@filterable() +def shuffleframes(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#shuffleframes""" + return filter(stream, shuffleframes.__name__, *args, **kwargs) + + +@filterable() +def shufflepixels(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#shufflepixels""" + return filter(stream, shufflepixels.__name__, *args, **kwargs) + + +@filterable() +def shuffleplanes(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#shuffleplanes""" + return filter(stream, shuffleplanes.__name__, *args, **kwargs) + + +@filterable() +def signalstats(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#signalstats""" + return filter(stream, signalstats.__name__, *args, **kwargs) + + +@filterable() +def signature(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#signature""" + return filter(stream, signature.__name__, *args, **kwargs) + + +@filterable() +def smartblur(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#smartblur""" + return filter(stream, smartblur.__name__, *args, **kwargs) + + +@filterable() +def sobel(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sobel""" + return filter(stream, sobel.__name__, *args, **kwargs) + + +@filterable() +def spp(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#spp""" + return filter(stream, spp.__name__, *args, **kwargs) + + +@filterable() +def sr(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sr""" + return filter(stream, sr.__name__, *args, **kwargs) + + +@filterable() +def ssim(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ssim""" + return filter(stream, ssim.__name__, *args, **kwargs) + + +@filterable() +def stereo3d(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#stereo3d""" + return filter(stream, stereo3d.__name__, *args, **kwargs) + + +@filterable() +def astreamselect(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#astreamselect""" + return filter(stream, astreamselect.__name__, *args, **kwargs) + + +@filterable() +def subtitles(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#subtitles""" + return filter(stream, subtitles.__name__, *args, **kwargs) + + +@filterable() +def super2xsai(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#super2xsai""" + return filter(stream, super2xsai.__name__, *args, **kwargs) + + +@filterable() +def swaprect(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#swaprect""" + return filter(stream, swaprect.__name__, *args, **kwargs) + + +@filterable() +def swapuv(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#swapuv""" + return filter(stream, swapuv.__name__, *args, **kwargs) + + +@filterable() +def tblend(stream: Stream, all_mode: str = None, all_opacity: float = None, + all_expr: str = None, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tblend""" + return filter(stream, tblend.__name__, all_mode=all_mode, all_opacity=all_opacity, all_expr=all_expr, **kwargs) + + +@filterable() +def telecine(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#telecine""" + return filter(stream, telecine.__name__, *args, **kwargs) + + +@filterable() +def thistogram(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#thistogram""" + return filter(stream, thistogram.__name__, *args, **kwargs) + + +@filterable() +def threshold(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#threshold""" + return filter(stream, threshold.__name__, *args, **kwargs) + + +@filterable() +def thumbnail(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#thumbnail""" + return filter(stream, thumbnail.__name__, *args, **kwargs) + + +@filterable() +def tile(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tile""" + return filter(stream, tile.__name__, *args, **kwargs) + + +@filterable() +def tinterlace(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tinterlace""" + return filter(stream, tinterlace.__name__, *args, **kwargs) + + +@filterable() +def tmedian(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tmedian""" + return filter(stream, tmedian.__name__, *args, **kwargs) + + +@filterable() +def tmidequalizer(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tmidequalizer""" + return filter(stream, tmidequalizer.__name__, *args, **kwargs) + + +@filterable() +def tmix(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tmix""" + return filter(stream, tmix.__name__, *args, **kwargs) + + +@filterable() +def tonemap(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tonemap""" + return filter(stream, tonemap.__name__, *args, **kwargs) + + +@filterable() +def tpad(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tpad""" + return filter(stream, tpad.__name__, *args, **kwargs) + + +@filterable() +def transpose(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#transpose""" + return filter(stream, transpose.__name__, *args, **kwargs) + + +@filterable() +def transpose_npp(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#transpose_npp""" + return filter(stream, transpose_npp.__name__, *args, **kwargs) + + +@filterable() +def trim(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#trim""" + return filter(stream, trim.__name__, *args, **kwargs) + + +@filterable() +def unpremultiply(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#unpremultiply""" + return filter(stream, unpremultiply.__name__, *args, **kwargs) + + +@filterable() +def unsharp(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#unsharp""" + return filter(stream, unsharp.__name__, *args, **kwargs) + + +@filterable() +def untile(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#untile""" + return filter(stream, untile.__name__, *args, **kwargs) + + +@filterable() +def uspp(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#uspp""" + return filter(stream, uspp.__name__, *args, **kwargs) + + +@filterable() +def v360(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#v360""" + return filter(stream, v360.__name__, *args, **kwargs) + + +@filterable() +def vaguedenoiser(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vaguedenoiser""" + return filter(stream, vaguedenoiser.__name__, *args, **kwargs) + + +@filterable() +def vectorscope(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vectorscope""" + return filter(stream, vectorscope.__name__, *args, **kwargs) + + +@filterable() +def vidstabdetect(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vidstabdetect""" + return filter(stream, vidstabdetect.__name__, *args, **kwargs) + + +@filterable() +def vidstabtransform(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vidstabtransform""" + return filter(stream, vidstabtransform.__name__, *args, **kwargs) + + +@filterable() +def vflip(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vflip""" + return filter(stream, vflip.__name__, *args, **kwargs) + + +@filterable() +def vfrdet(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vfrdet""" + return filter(stream, vfrdet.__name__, *args, **kwargs) + + +@filterable() +def vibrance(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vibrance""" + return filter(stream, vibrance.__name__, *args, **kwargs) + + +@filterable() +def vif(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vif""" + return filter(stream, vif.__name__, *args, **kwargs) + + +@filterable() +def vignette(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vignette""" + return filter(stream, vignette.__name__, *args, **kwargs) + + +@filterable() +def vmafmotion(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vmafmotion""" + return filter(stream, vmafmotion.__name__, *args, **kwargs) + + +@filterable() +def vstack(*streams: Stream, inputs: int = None, + shortest: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vstack""" + return filter(streams, vstack.__name__, inputs=inputs or len(streams), shortest=shortest, min_inputs=2) + + +@filterable() +def w3fdif(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#w3fdif""" + return filter(stream, w3fdif.__name__, *args, **kwargs) + + +@filterable() +def waveform(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#waveform""" + return filter(stream, waveform.__name__, *args, **kwargs) + + +@filterable() +def doubleweave(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#doubleweave""" + return filter(stream, doubleweave.__name__, *args, **kwargs) + + +@filterable() +def xbr(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#xbr""" + return filter(stream, xbr.__name__, *args, **kwargs) + + +@filterable() +def xfade(*stream: Stream, transition: str = None, duration: float = None, + offset: float = None, expr: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#xfade""" + return filter(stream, xfade.__name__, max_inputs=2, transition=transition, duration=duration, offset=offset, + expr=expr) + + +@filterable() +def xmedian(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#xmedian""" + return filter(stream, xmedian.__name__, *args, **kwargs) + + +@filterable() +def xstack(*streams: Stream, inputs: int = None, layout: str = None, + shortest: int = None, fill: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#xstack""" + return filter(streams, xstack.__name__, inputs=inputs or len(streams), layout=layout, shortest=shortest, fill=fill, + min_inputs=2) + + +@filterable() +def yadif(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#yadif""" + return filter(stream, yadif.__name__, *args, **kwargs) + + +@filterable() +def yadif_cuda(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#yadif_cuda""" + return filter(stream, yadif_cuda.__name__, *args, **kwargs) + + +@filterable() +def yaepblur(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#yaepblur""" + return filter(stream, yaepblur.__name__, *args, **kwargs) + + +@filterable() +def zoompan(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#zoompan""" + return filter(stream, zoompan.__name__, *args, **kwargs) + + +@filterable() +def zscale(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#zscale""" + return filter(stream, zscale.__name__, *args, **kwargs) + + +"""OpenCL Video Filters + +https://ffmpeg.org/ffmpeg-filters.html#OpenCL-Video-Filters +""" + + +@filterable() +def avgblur_opencl(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#avgblur_opencl""" + return filter(stream, avgblur_opencl.__name__, *args, **kwargs) + + +@filterable() +def boxblur_opencl(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#boxblur_opencl""" + return filter(stream, boxblur_opencl.__name__, *args, **kwargs) + + +@filterable() +def colorkey_opencl(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorkey_opencl""" + return filter(stream, colorkey_opencl.__name__, *args, **kwargs) + + +@filterable() +def convolution_opencl(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#convolution_opencl""" + return filter(stream, convolution_opencl.__name__, *args, **kwargs) + + +@filterable() +def erosion_opencl(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#erosion_opencl""" + return filter(stream, erosion_opencl.__name__, *args, **kwargs) + + +@filterable() +def deshake_opencl(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#deshake_opencl""" + return filter(stream, deshake_opencl.__name__, *args, **kwargs) + + +@filterable() +def dilation_opencl(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#dilation_opencl""" + return filter(stream, dilation_opencl.__name__, *args, **kwargs) + + +@filterable() +def nlmeans_opencl(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#nlmeans_opencl""" + return filter(stream, nlmeans_opencl.__name__, *args, **kwargs) + + +@filterable() +def overlay_opencl(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#overlay_opencl""" + return filter(stream, overlay_opencl.__name__, *args, **kwargs) + + +@filterable() +def pad_opencl(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#pad_opencl""" + return filter(stream, pad_opencl.__name__, *args, **kwargs) + + +@filterable() +def prewitt_opencl(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#prewitt_opencl""" + return filter(stream, prewitt_opencl.__name__, *args, **kwargs) + + +@filterable() +def program_opencl(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#program_opencl""" + return filter(stream, program_opencl.__name__, *args, **kwargs) + + +@filterable() +def roberts_opencl(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#roberts_opencl""" + return filter(stream, roberts_opencl.__name__, *args, **kwargs) + + +@filterable() +def sobel_opencl(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sobel_opencl""" + return filter(stream, sobel_opencl.__name__, *args, **kwargs) + + +@filterable() +def tonemap_opencl(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tonemap_opencl""" + return filter(stream, tonemap_opencl.__name__, *args, **kwargs) + + +@filterable() +def unsharp_opencl(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#unsharp_opencl""" + return filter(stream, unsharp_opencl.__name__, *args, **kwargs) + + +@filterable() +def xfade_opencl(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#xfade_005fopencl""" + return filter(stream, xfade_opencl.__name__, *args, **kwargs) + + +"""VAAPI Video Filters + +https://ffmpeg.org/ffmpeg-filters.html#VAAPI-Video-Filters +""" + + +@filterable() +def tonemap_vaapi(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tonemap_vaapi""" + return filter(stream, tonemap_vaapi.__name__, *args, **kwargs) + + +"""Video Sources + +https://ffmpeg.org/ffmpeg-filters.html#Video-Sources +""" + + +@filterable() +def buffer(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#buffer""" + return filter(stream, buffer.__name__, *args, **kwargs) + + +@filterable() +def cellauto(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#cellauto""" + return filter(stream, cellauto.__name__, *args, **kwargs) + + +@filterable() +def coreimagesrc(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#coreimagesrc""" + return filter(stream, coreimagesrc.__name__, *args, **kwargs) + + +@filterable() +def gradients(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#gradients""" + return filter(stream, gradients.__name__, *args, **kwargs) + + +@filterable() +def mandelbrot(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#mandelbrot""" + return filter(stream, mandelbrot.__name__, *args, **kwargs) + + +@filterable() +def mptestsrc(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#mptestsrc""" + return filter(stream, mptestsrc.__name__, *args, **kwargs) + + +@filterable() +def frei0r_src(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#frei0r_src""" + return filter(stream, frei0r_src.__name__, *args, **kwargs) + + +@filterable() +def life(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#life""" + return filter(stream, life.__name__, *args, **kwargs) + + +@filterable() +def yuvtestsrc(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#yuvtestsrc""" + return filter(stream, yuvtestsrc.__name__, *args, **kwargs) + + +@filterable() +def openclsrc(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#openclsrc""" + return filter(stream, openclsrc.__name__, *args, **kwargs) + + +@filterable() +def sierpinski(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sierpinski""" + return filter(stream, sierpinski.__name__, *args, **kwargs) + + +"""Video Sinks + +https://ffmpeg.org/ffmpeg-filters.html#Video-Sinks +""" + + +@filterable() +def buffersink(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#buffersink""" + return filter(stream, buffersink.__name__, *args, **kwargs) + + +@filterable() +def nullsink(stream: Stream, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#nullsink""" + return filter(stream, nullsink.__name__, *args, **kwargs) diff --git a/ffmpeg/nodes.py b/ffmpeg/nodes.py new file mode 100644 index 0000000..91f977b --- /dev/null +++ b/ffmpeg/nodes.py @@ -0,0 +1,2231 @@ +''' +Date: 2021.02.27 09:29:10 +Description: Omit +LastEditors: Rustle Karl +LastEditTime: 2021.04.29 10:59:38 +''' +from __future__ import annotations + +import copy +import os +import subprocess +from collections import defaultdict +from pathlib import Path +from time import perf_counter +from typing import Dict, List, Tuple, Union + +from pkgs import color + +from ._dag import DagEdge, DagNode, topological_sort +from ._node import (Node, NodeTypes, Stream, format_input_stream_tag, + get_filters_spec, get_stream_spec_nodes, streamable) +from ._utils import (join_cmd_args_seq, convert_kwargs_to_cmd_line_args, + escape) + +__all__ = [ + 'FFmpegError', + 'FilterableStream', + 'FilterNode', + 'GlobalNode', + 'InputNode', + 'MergeOutputsNode', + 'OutputNode', + 'OutputStream', + 'Stream', + 'filterable', +] + + +def filterable(): + return streamable(FilterableStream) + + +class FFmpegError(Exception): + + def __init__(self, executable, stdout, stderr): + msg = [executable] + + if stdout: + msg.append(stdout.decode('utf-8')) + + if stderr: + msg.append(stderr.decode('utf-8')) + + super(FFmpegError, self).__init__(' '.join(msg)) + + +class OutputStream(Stream): + def __init__(self, upstream_node: Node, upstream_label: str, selector=None): + super().__init__(upstream_node=upstream_node, upstream_label=upstream_label, + node_types=(OutputNode, GlobalNode, MergeOutputsNode), selector=selector) + + def with_global_args(self, *args: str) -> OutputStream: + return GlobalNode(self, args=args).stream() + + def merge_outputs(self, *streams: Stream) -> OutputStream: + """Add extra global command-line argument(s), e.g. ``-progress``.""" + return MergeOutputsNode([self, *streams]).stream() + + def get_output_args(self, overwrite=True, progress='') -> List[str]: + nodes = get_stream_spec_nodes(self) + sorted_nodes, outgoing_edge_graphs = topological_sort(nodes) + + type_nodes = defaultdict(list) + for node in sorted_nodes: + type_nodes[node.Type].append(node) + + stream_tag_graph = {(node, None): str(index) + for index, node in enumerate(type_nodes[NodeTypes.Input])} + + args = [] + for node in type_nodes[NodeTypes.Input]: + args.extend(node.get_input_args()) + + filters_spec = get_filters_spec(type_nodes[NodeTypes.Filter], + stream_tag_graph, outgoing_edge_graphs) + if filters_spec: + args.extend(['-filter_complex', filters_spec]) + + for node in type_nodes[NodeTypes.Output]: + args.extend(node.get_output_args(stream_tag_graph)) + + for node in type_nodes[NodeTypes.Global]: + args.extend(node.get_global_args()) + + if progress: + args.extend(['-progress', 'unix://' + progress.replace('unix://', '', 1)]) + + if overwrite: + args.append('-y') + + args.append('-hide_banner') + + return args + + def compile(self, executable="ffmpeg", direct_print=True, join_args=False, + overwrite=True, progress='') -> Union[str, List[str]]: + '''Build command-line for invoking ffmpeg.''' + cmd_args_seq = [executable] + self.get_output_args(overwrite, progress) + command = join_cmd_args_seq(cmd_args_seq) + + if direct_print: + color.greenln(command) + + if join_args: + return command + + return cmd_args_seq + + def run_async(self, executable="ffmpeg", direct_print=True, join_args=False, + pipe_stdin=False, pipe_stdout=True, pipe_stderr=True, quiet=False, + overwrite=True, progress='') -> subprocess.Popen: + '''Asynchronously invoke ffmpeg for the supplied node graph.''' + command_sequence = self.compile(executable, direct_print, join_args, overwrite, progress) + + stdin_stream = subprocess.PIPE if pipe_stdin else None + stdout_stream = subprocess.PIPE if pipe_stdout else None + stderr_stream = subprocess.PIPE if pipe_stderr else None + + return subprocess.Popen( + command_sequence, + stdin=stdin_stream, + stdout=stdout_stream if not quiet else subprocess.DEVNULL, + stderr=stderr_stream if not quiet else subprocess.STDOUT, + ) + + def run(self, executable="ffmpeg", direct_print=True, quiet=False, + capture_stdout=True, capture_stderr=True, pipe_stdin=None, + overwrite=True, progress='') -> Union[str, List[str]]: + '''Invoke ffmpeg for the supplied node graph.''' + start = perf_counter() + process = self.run_async( + executable, + direct_print, + quiet=quiet, + pipe_stdin=pipe_stdin is not None, + pipe_stdout=capture_stdout, + pipe_stderr=capture_stderr, + overwrite=overwrite, + progress=progress, + ) + + stdout, stderr = process.communicate(pipe_stdin) + if process.poll(): + raise FFmpegError('ffmpeg', stdout, stderr) + + end = perf_counter() + if not progress: + color.redln("[%2.4fs]\n" % (end - start)) + + return stdout, stderr + + +class FilterableStream(Stream): + + def __init__(self, upstream_node: Node, upstream_label: str, selector=None): + super().__init__( + upstream_node=upstream_node, + upstream_label=upstream_label, + node_types=(InputNode, FilterNode), + selector=selector, + ) + + def output(self, *streams_or_source, vn=False, an=False, ar=None, ab=None, ac=None, + acodec=None, vcodec=None, codec: str = None, aq_scale=None, vq_scale=None, + aspect=None, fps=None, format=None, pixel_format=None, video_bitrate=None, + audio_bitrate=None, v_profile=None, preset=None, mov_flags=None, + shortest=False, frame_size=None, v_frames: int = None, start_position: float = None, + duration: float = None, video_filter: str = None, audio_filter: str = None, ignore_output=False, + preview: bool = False, enable_cuda=True, args: list = None, **kwargs) -> OutputStream: + raise NotImplementedError + + def filter(self, *args, **kwargs) -> FilterableStream: + raise NotImplementedError + + # Custom Filters + def gltransition(self, source: Union[str, Path] = None, offset: float = 1, + duration: float = 1) -> FilterableStream: + raise NotImplementedError + + # Audio Filters + def acompressor(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#acompressor""" + raise NotImplementedError + + def acontrast(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#acontrast""" + raise NotImplementedError + + def acopy(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#acopy""" + raise NotImplementedError + + def acrossfade(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#acrossfade""" + raise NotImplementedError + + def acrossover(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#acrossover""" + raise NotImplementedError + + def acrusher(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#acrusher""" + raise NotImplementedError + + def acue(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#acue""" + raise NotImplementedError + + def adeclick(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#adeclick""" + raise NotImplementedError + + def adeclip(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#adeclip""" + raise NotImplementedError + + def adelay(self, delays: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#adelay""" + raise NotImplementedError + + def adenorm(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#adenorm""" + raise NotImplementedError + + def aintegral(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aintegral""" + raise NotImplementedError + + def aecho(self, in_gain: int = 0.6, out_gain: int = 0.3, delays: str = "1000|1800", + decays: str = "0.3|0.25") -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aecho""" + raise NotImplementedError + + def aemphasis(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aemphasis""" + raise NotImplementedError + + def aeval(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aeval""" + raise NotImplementedError + + def aexciter(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aexciter""" + raise NotImplementedError + + def afade(self, fadein: bool = False, fadeout: bool = False, + start_sample: int = None, nb_samples: int = None, + start_time: int = None, duration: int = None, + curve: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#afade""" + raise NotImplementedError + + def afftdn(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#afftdn""" + raise NotImplementedError + + def afftfilt(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#afftfilt""" + raise NotImplementedError + + def afir(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#afir""" + raise NotImplementedError + + def aformat(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aformat""" + raise NotImplementedError + + def afreqshift(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#afreqshift""" + raise NotImplementedError + + def agate(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#agate""" + raise NotImplementedError + + def aiir(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aiir""" + raise NotImplementedError + + def alimiter(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#alimiter""" + raise NotImplementedError + + def allpass(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#allpass""" + raise NotImplementedError + + def aloop(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aloop""" + raise NotImplementedError + + def amerge(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#amerge""" + raise NotImplementedError + + def amix(self, inputs: int = None, duration: str = None, + dropout_transition: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#amix""" + raise NotImplementedError + + def amultiply(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#amultiply""" + raise NotImplementedError + + def anequalizer(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#anequalizer""" + raise NotImplementedError + + def anlmdn(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#anlmdn""" + raise NotImplementedError + + def anlms(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#anlms""" + raise NotImplementedError + + def anull(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#anull""" + raise NotImplementedError + + def apad(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#apad""" + raise NotImplementedError + + def aphaser(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aphaser""" + raise NotImplementedError + + def aphaseshift(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aphaseshift""" + raise NotImplementedError + + def apulsator(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#apulsator""" + raise NotImplementedError + + def aresample(self, inputs: int = None, duration: str = None, + dropout_transition: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aresample""" + raise NotImplementedError + + def areverse(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#areverse""" + raise NotImplementedError + + def arnndn(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#arnndn""" + raise NotImplementedError + + def asetnsamples(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asetnsamples""" + raise NotImplementedError + + def asetrate(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asetrate""" + raise NotImplementedError + + def ashowinfo(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ashowinfo""" + raise NotImplementedError + + def asoftclip(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asoftclip""" + raise NotImplementedError + + def asr(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asr""" + raise NotImplementedError + + def astats(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#astats""" + raise NotImplementedError + + def asubboost(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asubboost""" + raise NotImplementedError + + def asubcut(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asubcut""" + raise NotImplementedError + + def asupercut(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asupercut""" + raise NotImplementedError + + def asuperpass(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asuperpass""" + raise NotImplementedError + + def asuperstop(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asuperstop""" + raise NotImplementedError + + def atempo(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#atempo""" + raise NotImplementedError + + def atrim(self, start: float = None, end: float = None, start_pts: int = None, + end_pts: int = None, duration: float = None, start_frame: int = None, + end_frame: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#atrim""" + raise NotImplementedError + + def axcorrelate(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#axcorrelate""" + raise NotImplementedError + + def bandpass(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#bandpass""" + raise NotImplementedError + + def bandreject(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#bandreject""" + raise NotImplementedError + + def lowshelf(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lowshelf""" + raise NotImplementedError + + def biquad(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#biquad""" + raise NotImplementedError + + def bs2b(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#bs2b""" + raise NotImplementedError + + def channelmap(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#channelmap""" + raise NotImplementedError + + def channelsplit(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#channelsplit""" + raise NotImplementedError + + def chorus(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#chorus""" + raise NotImplementedError + + def compand(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#compand""" + raise NotImplementedError + + def compensationdelay(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#compensationdelay""" + raise NotImplementedError + + def crossfeed(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#crossfeed""" + raise NotImplementedError + + def crystalizer(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#crystalizer""" + raise NotImplementedError + + def dcshift(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#dcshift""" + raise NotImplementedError + + def deesser(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#deesser""" + raise NotImplementedError + + def drmeter(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#drmeter""" + raise NotImplementedError + + def dynaudnorm(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#dynaudnorm""" + raise NotImplementedError + + def earwax(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#earwax""" + raise NotImplementedError + + def equalizer(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#equalizer""" + raise NotImplementedError + + def extrastereo(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#extrastereo""" + raise NotImplementedError + + def firequalizer(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#firequalizer""" + raise NotImplementedError + + def flanger(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#flanger""" + raise NotImplementedError + + def haas(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#haas""" + raise NotImplementedError + + def hdcd(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hdcd""" + raise NotImplementedError + + def headphone(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#headphone""" + raise NotImplementedError + + def highpass(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#highpass""" + raise NotImplementedError + + def join(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#join""" + raise NotImplementedError + + def ladspa(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ladspa""" + raise NotImplementedError + + def loudnorm(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#loudnorm""" + raise NotImplementedError + + def lowpass(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lowpass""" + raise NotImplementedError + + def lv2(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lv2""" + raise NotImplementedError + + def mcompand(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#mcompand""" + raise NotImplementedError + + def pan(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#pan""" + raise NotImplementedError + + def replaygain(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#replaygain""" + raise NotImplementedError + + def resample(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#resample""" + raise NotImplementedError + + def rubberband(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#rubberband""" + raise NotImplementedError + + def sidechaincompress(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sidechaincompress""" + raise NotImplementedError + + def sidechaingate(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sidechaingate""" + raise NotImplementedError + + def silencedetect(self, noise: float, duration: float) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#silencedetect""" + raise NotImplementedError + + def silenceremove(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#silenceremove""" + raise NotImplementedError + + def sofalizer(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sofalizer""" + raise NotImplementedError + + def speechnorm(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#speechnorm""" + raise NotImplementedError + + def stereotools(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#stereotools""" + raise NotImplementedError + + def stereowiden(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#stereowiden""" + raise NotImplementedError + + def superequalizer(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#superequalizer""" + raise NotImplementedError + + def surround(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#surround""" + raise NotImplementedError + + def highshelf(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#highshelf""" + raise NotImplementedError + + def tremolo(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tremolo""" + raise NotImplementedError + + def vibrato(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vibrato""" + raise NotImplementedError + + def volume(self, volume: Union[str, float], *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#volume""" + raise NotImplementedError + + def volumedetect(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#volumedetect""" + raise NotImplementedError + + def abuffer(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#abuffer""" + raise NotImplementedError + + def aevalsrc(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aevalsrc""" + raise NotImplementedError + + def afirsrc(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#afirsrc""" + raise NotImplementedError + + def anullsrc(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#anullsrc""" + raise NotImplementedError + + def flite(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#flite""" + raise NotImplementedError + + def anoisesrc(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#anoisesrc""" + raise NotImplementedError + + def hilbert(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hilbert""" + raise NotImplementedError + + def sinc(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sinc""" + raise NotImplementedError + + def sine(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sine""" + raise NotImplementedError + + def abuffersink(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#abuffersink""" + raise NotImplementedError + + def anullsink(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#anullsink""" + raise NotImplementedError + + # Video Filters + def addroi(self, x: int = None, y: int = None, w: int = None, h: int = None, + qoffset: float = None, clear: bool = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#addroi""" + raise NotImplementedError + + def alphaextract(self) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#alphaextract""" + raise NotImplementedError + + def alphamerge(self, *streams: Stream) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#alphamerge""" + raise NotImplementedError + + def amplify(self, radius: int = None, factor: int = None, threshold: int = None, + tolerance: int = None, low: int = None, high: int = None, + planes: int = None, ) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#amplify""" + raise NotImplementedError + + def ass(self, filename: Union[str, Path] = None, original_size: str = None, + fontsdir: Union[str, Path] = None, alpha: int = None, charenc: str = None, + stream_index: int = None, force_style: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ass""" + raise NotImplementedError + + def atadenoise(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#atadenoise""" + raise NotImplementedError + + def avgblur(self, x: int = None, y: int = None, + planes: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#avgblur""" + raise NotImplementedError + + def bbox(self, min_val: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#bbox""" + raise NotImplementedError + + def bilateral(self, s: float = None, r: float = None, + planes: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#bilateral""" + raise NotImplementedError + + def bitplanenoise(self, bitplane: int = None, + filter: bool = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#bitplanenoise""" + raise NotImplementedError + + def blackdetect(self, d: float = None, pic_th: float = None, + pix_th: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#blackdetect""" + raise NotImplementedError + + def blackframe(self, amount: float = None, + threshold: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#blackframe""" + raise NotImplementedError + + def blend(self, all_mode: str = None, all_opacity: float = None, + all_expr: str = None, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#blend""" + raise NotImplementedError + + def bm3d(self, sigma: float = None, block: int = None, bstep: int = None, + group: int = None, range: int = None, mstep: int = None, thmse: int = None, + hdthr: int = None, estim: str = None, ref: bool = None, + planes: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#bm3d""" + raise NotImplementedError + + def boxblur(self, luma_radius: int = None, luma_power: int = None, + chroma_radius: int = None, chroma_power: int = None, + alpha_radius: int = None, alpha_power: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#boxblur""" + raise NotImplementedError + + def bwdif(self, mode: int = None, parity: int = None, + deint: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#bwdif""" + raise NotImplementedError + + def cas(self, strength: float = None, planes: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#cas""" + raise NotImplementedError + + def chromahold(self, color: str = None, similarity: float = None, + blend: float = None, yuv: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#chromahold""" + raise NotImplementedError + + def chromakey(self, color: str = None, similarity: float = None, + blend: float = None, yuv: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#chromakey""" + raise NotImplementedError + + def chromanr(self, thres: int = None, sizew: int = None, sizeh: int = None, + stepw: int = None, steph: int = None, threy: int = None, + threu: int = None, threv: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#chromanr""" + raise NotImplementedError + + def chromashift(self, cbh: int = None, cbv: int = None, crh: int = None, + crv: int = None, edge: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#chromashift""" + raise NotImplementedError + + def ciescope(self, system: str = None, cie: str = None, gamuts: str = None, + size: int = None, intensity: float = None, contrast: float = None, + corrgamma: bool = None, showwhite: bool = None, + gamma: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ciescope""" + raise NotImplementedError + + def codecview(self, mv: str = None, qp: str = None, mv_type: str = None, + frame_type: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#codecview""" + raise NotImplementedError + + def colorbalance(self, rs: float = None, gs: float = None, + bs: float = None, rm: float = None, gm: float = None, + bm: float = None, rh: float = None, gh: float = None, + bh: float = None, pl: bool = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorbalance""" + raise NotImplementedError + + def colorcontrast(self, rc: float = None, gm: float = None, by: float = None, + rcw: float = None, gmw: float = None, byw: float = None, + pl: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorcontrast""" + raise NotImplementedError + + def colorcorrect(self, rl: float = None, bl: float = None, rh: float = None, + bh: float = None, saturation: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorcorrect""" + raise NotImplementedError + + def colorchannelmixer(self, rr: float = None, rg: float = None, + rb: float = None, ra: float = None, gr: float = None, + gg: float = None, gb: float = None, ga: float = None, + br: float = None, bg: float = None, ba: float = None, + ar: float = None, ag: float = None, ab: float = None, + aa: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorchannelmixer""" + raise NotImplementedError + + def colorize(self, hue: int = None, saturation: float = None, + lightness: float = None, mix: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorize""" + raise NotImplementedError + + def colorkey(self, color: str = None, similarity: float = None, + blend: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorkey""" + raise NotImplementedError + + def colorhold(self, color: str = None, similarity: float = None, + blend: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorhold""" + raise NotImplementedError + + def colorlevels(self, rimin: float = None, gimin: float = None, + bimin: float = None, aimin: float = None, rimax: float = None, + gimax: float = None, bimax: float = None, aimax: float = None, + romin: float = None, gomin: float = None, bomin: float = None, + aomin: float = None, romax: float = None, gomax: float = None, + bomax: float = None, aomax: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorlevels""" + raise NotImplementedError + + def colormatrix(self, src: str, dst: str) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colormatrix""" + raise NotImplementedError + + def colorspace(self, all: str = None, space: str = None, trc: str = None, + primaries: str = None, range: str = None, format: str = None, + fast: bool = None, dither: str = None, wpadapt: str = None, + iall: str = None, ispace: str = None, iprimaries: str = None, + itrc: str = None, irange: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorspace""" + raise NotImplementedError + + def colortemperature(self, temperature: int = None, mix: float = None, + pl: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colortemperature""" + raise NotImplementedError + + def convolution(self, m0: int = None, m1: int = None, m2: int = None, m3: int = None, + rdiv0: int = None, rdiv1: int = None, rdiv2: int = None, rdiv3: int = None, + bias0: int = None, bias1: int = None, bias2: int = None, bias3: int = None, + mode0: int = None, mode1: int = None, mode2: int = None, + mode3: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#convolution""" + raise NotImplementedError + + def convolve(self, planes: int = None, impulse: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#convolve""" + raise NotImplementedError + + def copy(self) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#copy""" + raise NotImplementedError + + def coreimage(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#coreimage""" + raise NotImplementedError + + def cover_rect(self, cover: Union[str, Path] = None, + mode: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#cover_rect""" + raise NotImplementedError + + def crop(self, w: int = None, h: int = None, x: int = None, y: int = None, + keep_aspect: bool = None, exact: bool = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#crop""" + raise NotImplementedError + + def cropdetect(self, limit: int = None, round: int = None, + reset_count: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#cropdetect""" + raise NotImplementedError + + def cue(self, cue: int = None, preroll: int = None, + buffer: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#cue""" + raise NotImplementedError + + def curves(self, preset: str = None, master: str = None, red: str = None, + green: str = None, blue: str = None, all: str = None, + psfile: Union[str, Path] = None, plot: str = None) -> FilterableStream: + """Apply color adjustments using curves. + + https://ffmpeg.org/ffmpeg-filters.html#curves""" + raise NotImplementedError + + def datascope(self, size: str = None, x: int = None, y: int = None, + mode: str = None, axis: str = None, opacity: float = None, + format: str = None, components: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#datascope""" + raise NotImplementedError + + def dblur(self, angle: int = None, radius: int = None, + planes: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#dblur""" + raise NotImplementedError + + def dctdnoiz(self, sigma: float = None, overlap: int = None, expr: str = None, + n: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#dctdnoiz""" + raise NotImplementedError + + def deband(self, thr1: float = None, thr2: float = None, thr3: float = None, + thr4: float = None, r: int = None, d: float = None, blur: bool = None, + c: bool = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#deband""" + raise NotImplementedError + + def deblock(self, filter: str = None, block: int = None, alpha: float = None, + beta: float = None, gamma: float = None, delta: float = None, + planes: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#deblock""" + raise NotImplementedError + + def decimate(self, cycle: int = None, dupthres: float = None, + scthresh: float = None, blockx: int = None, blocky: int = None, + ppsrc: int = None, chroma: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#decimate""" + raise NotImplementedError + + def deconvolve(self, planes: int = None, impulse: str = None, + noise: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#deconvolve""" + raise NotImplementedError + + def dedot(self, m: str = None, lt: float = None, tl: float = None, + tc: float = None, ct: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#dedot""" + raise NotImplementedError + + def deflate(self, threshold0: int = None, threshold1: int = None, + threshold2: int = None, threshold3: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#deflate""" + raise NotImplementedError + + def deflicker(self, size: int = None, mode: str = None, + bypass: bool = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#deflicker""" + raise NotImplementedError + + def dejudder(self, cycle: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#dejudder""" + raise NotImplementedError + + def delogo(self, x: int, y: int, w: int, h: int, band: int = None, + show: bool = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#delogo""" + raise NotImplementedError + + def derain(self, filter_type: str = None, dnn_backend: str = None, + mode: Union[str, Path] = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#derain""" + raise NotImplementedError + + def deshake(self, x: int = None, y: int = None, w: int = None, + h: int = None, rx: int = None, ry: int = None, edge: str = None, + blocksize: int = None, contrast: int = None, search: str = None, + filename: Union[str, Path] = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#deshake""" + raise NotImplementedError + + def despill(self, type: str = None, mix: str = None, expand: int = None, + red: int = None, green: int = None, blue: int = None, + brightness: int = None, alpha: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#despill""" + raise NotImplementedError + + def detelecine(self, first_field: str = None, pattern: int = None, + start_frame: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#detelecine""" + raise NotImplementedError + + def dilation(self, threshold0: int = None, threshold1: int = None, + threshold2: int = None, threshold3: int = None, + coordinates: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#dilation""" + raise NotImplementedError + + def displace(self, edge: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#displace""" + raise NotImplementedError + + def dnn_processing(self, dnn_backend: str = None, + model: Union[str, Path] = None, input: str = None, + output: str = None, set_async: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#dnn_processing""" + raise NotImplementedError + + def drawbox(self, x: int = None, y: int = None, w: int = None, + h: int = None, color: str = None, thickness: int = None, + replace: bool = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#drawbox""" + raise NotImplementedError + + def drawgraph(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#drawgraph""" + raise NotImplementedError + + def drawgrid(self, x: int = None, y: int = None, w: int = None, + h: int = None, color: str = None, thickness: int = None, + replace: bool = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#drawgrid""" + raise NotImplementedError + + def drawtext(self, text: str = None, x: int = 0, y: int = 0, + fontsize: int = 0, fontfile: Union[str, Path] = None, + fontcolor: str = None, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#drawtext""" + raise NotImplementedError + + def edgedetect(self, low: float = None, high: float = None, mode: str = None, + planes: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#edgedetect""" + raise NotImplementedError + + def elbg(self, codebook_length: int = None, nb_steps: int = None, + seed: int = None, pal8: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#elbg""" + raise NotImplementedError + + def entropy(self, mode: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#entropy""" + raise NotImplementedError + + def epx(self, n: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#epx""" + raise NotImplementedError + + def eq(self, contrast: float = None, brightness: float = None, + saturation: float = None, gamma: float = None, gamma_r: float = None, + gamma_g: float = None, gamma_b: float = None, + gamma_weight: float = None, eval: str = None, ) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#eq""" + raise NotImplementedError + + def erosion(self, threshold0: int = None, threshold1: int = None, + threshold2: int = None, threshold3: int = None, + coordinates: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#erosion""" + raise NotImplementedError + + def estdif(self, mode: str = None, parity: str = None, deint: str = None, + rslope: int = None, redge: int = None, interp: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#estdif""" + raise NotImplementedError + + def exposure(self, exposure: float = None, black: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#exposure""" + raise NotImplementedError + + def extractplanes(self, planes: str = None) -> List[FilterableStream]: + """https://ffmpeg.org/ffmpeg-filters.html#extractplanes""" + raise NotImplementedError + + def fade(self, t: str = None, start_frame: int = None, nb_frames: int = None, + alpha: int = None, start_time: int = None, duration: int = None, + color: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fade""" + raise NotImplementedError + + def fftdnoiz(self, sigma: int = None, amount: int = None, block: int = None, + overlap: float = None, prev: int = None, next: int = None, + planes: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fftdnoiz""" + raise NotImplementedError + + def fftfilt(self, dc_Y: int = None, dc_U: int = None, dc_V: int = None, + weight_Y: str = None, weight_U: str = None, weight_V: str = None, + eval: str = None, X: int = None, Y: int = None, W: int = None, + H: int = None, N: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fftfilt""" + raise NotImplementedError + + def field(self, t: Union[int, str] = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#field""" + raise NotImplementedError + + def fieldhint(self, hint: Union[str, Path], mode: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fieldhint""" + raise NotImplementedError + + def fieldmatch(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fieldmatch""" + raise NotImplementedError + + def fieldorder(self, order: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fieldorder""" + raise NotImplementedError + + def fifo(self) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fifo_002c-afifo""" + raise NotImplementedError + + def afifo(self) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fifo_002c-afifo""" + raise NotImplementedError + + def fillborders(self, left: int = None, right: int = None, top: int = None, + bottom: int = None, mode: str = None, + color: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fillborders""" + raise NotImplementedError + + def find_rect(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#find_rect""" + raise NotImplementedError + + def floodfill(self, x: int = None, y: int = None, s0: str = None, s1: str = None, + s2: str = None, s3: str = None, d0: str = None, d1: str = None, + d2: str = None, d3: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#floodfill""" + raise NotImplementedError + + def format(self, *pix_fmt: str) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#format""" + raise NotImplementedError + + def fps(self, fps: int = None, start_time: int = None, round: str = None, + eof_action: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fps""" + raise NotImplementedError + + def framepack(self, *streams: Stream, format: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#framepack""" + raise NotImplementedError + + def framerate(self, fps: int = None, interp_start: int = None, interp_end: int = None, + scene: int = None, flags: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#framerate""" + raise NotImplementedError + + def framestep(self, step: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#framestep""" + raise NotImplementedError + + def freezedetect(self, noise: Union[str, float], duration: int) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#freezedetect""" + raise NotImplementedError + + def freezeframes(self, first: int = None, last: int = None, + replace: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#freezeframes""" + raise NotImplementedError + + def frei0r(self, filter_name: str, filter_params: str) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#frei0r""" + raise NotImplementedError + + def fspp(self, quality: int = None, qp: int = None, strength: int = None, + use_bframe_qp: bool = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#fspp""" + raise NotImplementedError + + def gblur(self, sigma: float = None, steps: int = None, planes: int = None, + sigma_v: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#gblur""" + raise NotImplementedError + + def geq(self, lum_expr: str = None, cb_expr: str = None, cr_expr: str = None, + alpha_expr: str = None, red_expr: str = None, green_expr: str = None, + blue_expr: str = None, lum: str = None, cb: str = None, cr: str = None, + r: str = None, g: str = None, b: str = None, a: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#geq""" + raise NotImplementedError + + def gradfun(self, strength: float = None, radius: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#gradfun""" + raise NotImplementedError + + def graphmonitor(self, size: str = None, opacity: float = None, mode: str = None, + flags: str = None, rate: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#graphmonitor""" + raise NotImplementedError + + def greyedge(self, difford: float = None, minknorm: int = None, + sigma: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#greyedge""" + raise NotImplementedError + + def haldclut(self, shortest: bool = None, repeatlast: bool = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#haldclut""" + raise NotImplementedError + + def hflip(self) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hflip""" + raise NotImplementedError + + def histeq(self, strength: float = None, intensity: float = None, + antibanding: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#histeq""" + raise NotImplementedError + + def histogram(self, level_height: int = None, scale_height: int = None, + display_mode: str = None, levels_mode: str = None, + components: int = None, fgopacity: float = None, + bgopacity: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#histogram""" + raise NotImplementedError + + def hqdn3d(self, luma_spatial: float = None, chroma_spatial: float = None, + luma_tmp: float = None, chroma_tmp: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hqdn3d""" + raise NotImplementedError + + def hwdownload(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hwdownload""" + raise NotImplementedError + + def hwmap(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hwmap""" + raise NotImplementedError + + def hwupload(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hwupload""" + raise NotImplementedError + + def hwupload_cuda(self, device: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hwupload_cuda""" + raise NotImplementedError + + def hqx(self, n: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hqx""" + raise NotImplementedError + + def hstack(self, *streams: Stream, inputs: int = None, + shortest: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hstack""" + raise NotImplementedError + + def hue(self, h: Union[str, int] = None, s: Union[str, int] = None, + H: Union[str, int] = None, b: Union[str, int] = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hue""" + raise NotImplementedError + + def hysteresis(self, planes: int = None, threshold: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#hysteresis""" + raise NotImplementedError + + def identity(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#identity""" + raise NotImplementedError + + def idet(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#idet""" + raise NotImplementedError + + def il(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#il""" + raise NotImplementedError + + def inflate(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#inflate""" + raise NotImplementedError + + def interlace(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#interlace""" + raise NotImplementedError + + def kerndeint(self, thresh: int = None, map: int = None, order: int = None, + sharp: int = None, twoway: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#kerndeint""" + raise NotImplementedError + + def kirsch(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#kirsch""" + raise NotImplementedError + + def lagfun(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lagfun""" + raise NotImplementedError + + def lenscorrection(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lenscorrection""" + raise NotImplementedError + + def lensfun(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lensfun""" + raise NotImplementedError + + def libvmaf(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#libvmaf""" + raise NotImplementedError + + def limiter(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#limiter""" + raise NotImplementedError + + def loop(self, loop: int = None, size: int = None, + start: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#loop""" + raise NotImplementedError + + def lut1d(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lut1d""" + raise NotImplementedError + + def lut3d(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lut3d""" + raise NotImplementedError + + def lumakey(self, threshold: float = None, tolerance: float = None, + softness: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lumakey""" + raise NotImplementedError + + def lutyuv(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#lutyuv""" + raise NotImplementedError + + def tlut2(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tlut2""" + raise NotImplementedError + + def maskedclamp(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#maskedclamp""" + raise NotImplementedError + + def maskedmax(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#maskedmax""" + raise NotImplementedError + + def maskedmerge(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#maskedmerge""" + raise NotImplementedError + + def maskedmin(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#maskedmin""" + raise NotImplementedError + + def maskedthreshold(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#maskedthreshold""" + raise NotImplementedError + + def maskfun(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#maskfun""" + raise NotImplementedError + + def mcdeint(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#mcdeint""" + raise NotImplementedError + + def median(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#median""" + raise NotImplementedError + + def mergeplanes(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#mergeplanes""" + raise NotImplementedError + + def mestimate(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#mestimate""" + raise NotImplementedError + + def midequalizer(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#midequalizer""" + raise NotImplementedError + + def minterpolate(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#minterpolate""" + raise NotImplementedError + + def mix(self, *streams: Stream, inputs: int = None, weights: str = None, + scale: str = None, duration: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#mix""" + raise NotImplementedError + + def monochrome(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#monochrome""" + raise NotImplementedError + + def mpdecimate(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#mpdecimate""" + raise NotImplementedError + + def negate(self, negate_alpha: bool = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#negate""" + raise NotImplementedError + + def nlmeans(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#nlmeans""" + raise NotImplementedError + + def nnedi(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#nnedi""" + raise NotImplementedError + + def noformat(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#noformat""" + raise NotImplementedError + + def noise(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#noise""" + raise NotImplementedError + + def normalize(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#normalize""" + raise NotImplementedError + + def null(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#null""" + raise NotImplementedError + + def ocr(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ocr""" + raise NotImplementedError + + def ocv(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ocv""" + raise NotImplementedError + + def oscilloscope(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#oscilloscope""" + raise NotImplementedError + + def overlay(self, overlay_node: Node, x: Union[int, str] = 0, y: Union[int, str] = 0, + eof_action: str = None, eval: str = None, shortest: bool = None, + format: str = None, repeatlast: bool = None, alpha: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#overlay""" + raise NotImplementedError + + def overlay_cuda(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#overlay_cuda""" + raise NotImplementedError + + def owdenoise(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#owdenoise""" + raise NotImplementedError + + def pad(self, w: Union[str, int] = None, h: Union[str, int] = None, + x: Union[str, int] = None, y: Union[str, int] = None, color: str = None, + eval: str = None, aspect: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#pad""" + raise NotImplementedError + + def palettegen(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#palettegen""" + raise NotImplementedError + + def paletteuse(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#paletteuse""" + raise NotImplementedError + + def perspective(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#perspective""" + raise NotImplementedError + + def phase(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#phase""" + raise NotImplementedError + + def photosensitivity(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#photosensitivity""" + raise NotImplementedError + + def pixdesctest(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#pixdesctest""" + raise NotImplementedError + + def pixscope(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#pixscope""" + raise NotImplementedError + + def pp(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#pp""" + raise NotImplementedError + + def pp7(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#pp7""" + raise NotImplementedError + + def premultiply(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#premultiply""" + raise NotImplementedError + + def prewitt(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#prewitt""" + raise NotImplementedError + + def pseudocolor(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#pseudocolor""" + raise NotImplementedError + + def psnr(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#psnr""" + raise NotImplementedError + + def pullup(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#pullup""" + raise NotImplementedError + + def qp(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#qp""" + raise NotImplementedError + + def random(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#random""" + raise NotImplementedError + + def readeia608(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#readeia608""" + raise NotImplementedError + + def readvitc(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#readvitc""" + raise NotImplementedError + + def remap(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#remap""" + raise NotImplementedError + + def removegrain(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#removegrain""" + raise NotImplementedError + + def removelogo(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#removelogo""" + raise NotImplementedError + + def repeatfields(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#repeatfields""" + raise NotImplementedError + + def reverse(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#reverse""" + raise NotImplementedError + + def rgbashift(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#rgbashift""" + raise NotImplementedError + + def roberts(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#roberts""" + raise NotImplementedError + + def rotate(self, angle: str = None, ow: str = None, oh: str = None, + bilinear: bool = None, fillcolor: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#rotate""" + raise NotImplementedError + + def sab(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sab""" + raise NotImplementedError + + def scale(self, w: int = -1, h: int = -1, eval: str = None, interl: int = None, + flags: str = None, size: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#scale""" + raise NotImplementedError + + def scale_npp(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#scale_npp""" + raise NotImplementedError + + def scale2ref(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#scale2ref""" + raise NotImplementedError + + def scroll(self, h: float = None, v: float = None, hpos: float = None, + vpos: float = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#scroll""" + raise NotImplementedError + + def scdet(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#scdet""" + raise NotImplementedError + + def selectivecolor(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#selectivecolor""" + raise NotImplementedError + + def separatefields(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#separatefields""" + raise NotImplementedError + + def setsar(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#setsar""" + raise NotImplementedError + + def setfield(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#setfield""" + raise NotImplementedError + + def setparams(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#setparams""" + raise NotImplementedError + + def shear(self, shx: float = None, shy: float = None, fillcolor: str = None, + interp: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#shear""" + raise NotImplementedError + + def showinfo(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showinfo""" + raise NotImplementedError + + def showpalette(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showpalette""" + raise NotImplementedError + + def shuffleframes(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#shuffleframes""" + raise NotImplementedError + + def shufflepixels(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#shufflepixels""" + raise NotImplementedError + + def shuffleplanes(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#shuffleplanes""" + raise NotImplementedError + + def signalstats(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#signalstats""" + raise NotImplementedError + + def signature(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#signature""" + raise NotImplementedError + + def smartblur(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#smartblur""" + raise NotImplementedError + + def sobel(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sobel""" + raise NotImplementedError + + def spp(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#spp""" + raise NotImplementedError + + def sr(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sr""" + raise NotImplementedError + + def ssim(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ssim""" + raise NotImplementedError + + def stereo3d(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#stereo3d""" + raise NotImplementedError + + def astreamselect(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#astreamselect""" + raise NotImplementedError + + def subtitles(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#subtitles""" + raise NotImplementedError + + def super2xsai(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#super2xsai""" + raise NotImplementedError + + def swaprect(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#swaprect""" + raise NotImplementedError + + def swapuv(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#swapuv""" + raise NotImplementedError + + def tblend(self, all_mode: str = None, all_opacity: float = None, + all_expr: str = None, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tblend""" + raise NotImplementedError + + def telecine(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#telecine""" + raise NotImplementedError + + def thistogram(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#thistogram""" + raise NotImplementedError + + def threshold(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#threshold""" + raise NotImplementedError + + def thumbnail(self, n: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#thumbnail""" + raise NotImplementedError + + def tile(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tile""" + raise NotImplementedError + + def tinterlace(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tinterlace""" + raise NotImplementedError + + def tmedian(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tmedian""" + raise NotImplementedError + + def tmidequalizer(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tmidequalizer""" + raise NotImplementedError + + def tmix(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tmix""" + raise NotImplementedError + + def tonemap(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tonemap""" + raise NotImplementedError + + def tpad(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tpad""" + raise NotImplementedError + + def transpose(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#transpose""" + raise NotImplementedError + + def transpose_npp(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#transpose_npp""" + raise NotImplementedError + + def trim(self, start: float = None, end: float = None, start_pts: int = None, + end_pts: int = None, duration: float = None, start_frame: int = None, + end_frame: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#trim""" + raise NotImplementedError + + def unpremultiply(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#unpremultiply""" + raise NotImplementedError + + def unsharp(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#unsharp""" + raise NotImplementedError + + def untile(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#untile""" + raise NotImplementedError + + def uspp(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#uspp""" + raise NotImplementedError + + def v360(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#v360""" + raise NotImplementedError + + def vaguedenoiser(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vaguedenoiser""" + raise NotImplementedError + + def vectorscope(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vectorscope""" + raise NotImplementedError + + def vidstabdetect(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vidstabdetect""" + raise NotImplementedError + + def vidstabtransform(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vidstabtransform""" + raise NotImplementedError + + def vflip(self) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vflip""" + raise NotImplementedError + + def vfrdet(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vfrdet""" + raise NotImplementedError + + def vibrance(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vibrance""" + raise NotImplementedError + + def vif(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vif""" + raise NotImplementedError + + def vignette(self, angle="PI/5", *args, **kwargs) -> FilterableStream: + """Make or reverse a natural vignetting effect. + + https://ffmpeg.org/ffmpeg-filters.html#vignette + """ + raise NotImplementedError + + def vmafmotion(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vmafmotion""" + raise NotImplementedError + + def vstack(self, *streams: Stream, inputs: int = None, + shortest: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#vstack""" + raise NotImplementedError + + def w3fdif(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#w3fdif""" + raise NotImplementedError + + def waveform(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#waveform""" + raise NotImplementedError + + def doubleweave(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#doubleweave""" + raise NotImplementedError + + def xbr(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#xbr""" + raise NotImplementedError + + def xfade(self, transition: str = None, duration: float = None, + offset: float = None, expr: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#xfade""" + raise NotImplementedError + + def xmedian(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#xmedian""" + raise NotImplementedError + + def xstack(self, *streams: Stream, inputs: int = None, layout: str = None, + shortest: int = None, fill: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#xstack""" + raise NotImplementedError + + def yadif(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#yadif""" + raise NotImplementedError + + def yadif_cuda(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#yadif_cuda""" + raise NotImplementedError + + def yaepblur(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#yaepblur""" + raise NotImplementedError + + def zoompan(self, z: int = None, zoom: int = None, x: Union[str, int] = None, + y: Union[str, int] = None, d: int = None, s: str = None, + fps: int = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#zoompan""" + raise NotImplementedError + + def zscale(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#zscale""" + raise NotImplementedError + + # OpenCL Video Filters + def avgblur_opencl(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#avgblur_opencl""" + raise NotImplementedError + + def boxblur_opencl(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#boxblur_opencl""" + raise NotImplementedError + + def colorkey_opencl(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#colorkey_opencl""" + raise NotImplementedError + + def convolution_opencl(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#convolution_opencl""" + raise NotImplementedError + + def erosion_opencl(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#erosion_opencl""" + raise NotImplementedError + + def deshake_opencl(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#deshake_opencl""" + raise NotImplementedError + + def dilation_opencl(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#dilation_opencl""" + raise NotImplementedError + + def nlmeans_opencl(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#nlmeans_opencl""" + raise NotImplementedError + + def overlay_opencl(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#overlay_opencl""" + raise NotImplementedError + + def pad_opencl(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#pad_opencl""" + raise NotImplementedError + + def prewitt_opencl(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#prewitt_opencl""" + raise NotImplementedError + + def program_opencl(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#program_opencl""" + raise NotImplementedError + + def roberts_opencl(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#roberts_opencl""" + raise NotImplementedError + + def sobel_opencl(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sobel_opencl""" + raise NotImplementedError + + def tonemap_opencl(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tonemap_opencl""" + raise NotImplementedError + + def unsharp_opencl(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#unsharp_opencl""" + raise NotImplementedError + + def xfade_opencl(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#xfade_opencl""" + raise NotImplementedError + + def tonemap_vaapi(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#tonemap_vaapi""" + raise NotImplementedError + + # Video Sources + def buffer(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#buffer""" + raise NotImplementedError + + def cellauto(self, f: Union[str, Path] = None, p: str = None, r: int = None, + ratio: float = None, seed: int = None, rule: int = None, s: str = None, + scroll: bool = None, full: bool = None, stitch: bool = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#cellauto""" + raise NotImplementedError + + def coreimagesrc(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#coreimagesrc""" + raise NotImplementedError + + def gradients(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#gradients""" + raise NotImplementedError + + def mandelbrot(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#mandelbrot""" + raise NotImplementedError + + def mptestsrc(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#mptestsrc""" + raise NotImplementedError + + def frei0r_src(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#frei0r_src""" + raise NotImplementedError + + def life(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#life""" + raise NotImplementedError + + def yuvtestsrc(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#yuvtestsrc""" + raise NotImplementedError + + def openclsrc(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#openclsrc""" + raise NotImplementedError + + def sierpinski(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#sierpinski""" + raise NotImplementedError + + def buffersink(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#buffersink""" + raise NotImplementedError + + def nullsink(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#nullsink""" + raise NotImplementedError + + def abitscope(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#abitscope""" + raise NotImplementedError + + def adrawgraph(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#adrawgraph""" + raise NotImplementedError + + def agraphmonitor(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#agraphmonitor""" + raise NotImplementedError + + def ahistogram(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ahistogram""" + raise NotImplementedError + + def aphasemeter(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aphasemeter""" + raise NotImplementedError + + def avectorscope(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#avectorscope""" + raise NotImplementedError + + def abench(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#abench""" + raise NotImplementedError + + def concat(self, *streams: Stream, n: int = None, v: int = None, a: int = None, + unsafe: bool = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#concat""" + raise NotImplementedError + + def ebur128(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ebur128""" + raise NotImplementedError + + def ainterleave(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ainterleave""" + raise NotImplementedError + + def ametadata(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#ametadata""" + raise NotImplementedError + + def aperms(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aperms""" + raise NotImplementedError + + def arealtime(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#arealtime""" + raise NotImplementedError + + def aselect(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#aselect""" + raise NotImplementedError + + def asendcmd(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asendcmd""" + raise NotImplementedError + + def setpts(self, expr: str = "PTS-STARTPTS") -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#setpts""" + raise NotImplementedError + + def asetpts(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asetpts""" + raise NotImplementedError + + def setrange(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#setrange""" + raise NotImplementedError + + def asettb(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asettb""" + raise NotImplementedError + + def showcqt(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showcqt""" + raise NotImplementedError + + def showfreqs(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showfreqs""" + raise NotImplementedError + + def showspatial(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showspatial""" + raise NotImplementedError + + def showspectrum(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showspectrum""" + raise NotImplementedError + + def showspectrumpic(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showspectrumpic""" + raise NotImplementedError + + def showvolume(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showvolume""" + raise NotImplementedError + + def showwaves(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showwaves""" + raise NotImplementedError + + def showwavespic(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#showwavespic""" + raise NotImplementedError + + def asidedata(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asidedata""" + raise NotImplementedError + + def spectrumsynth(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#spectrumsynth""" + raise NotImplementedError + + def asplit(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#asplit""" + raise NotImplementedError + + def azmq(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#azmq""" + raise NotImplementedError + + def amovie(self, *args, **kwargs) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#amovie""" + raise NotImplementedError + + def movie(self, filename: Union[str, Path] = None, format_name: str = None, + seek_point: float = None, streams: str = None, stream_index: int = None, + loop: int = None, discontinuity: str = None) -> FilterableStream: + """https://ffmpeg.org/ffmpeg-filters.html#movie""" + raise NotImplementedError + + # TODO + def split(self) -> List[FilterableStream]: + raise NotImplementedError + + def select(self, expr: str) -> FilterableStream: + raise NotImplementedError + + +class InputNode(Node): + + def __init__(self, args=None, kwargs=None): + super().__init__(NodeTypes.Input, None, {}, FilterableStream, + node_type=NodeTypes.Input, args=args, kwargs=kwargs) + + self._source = kwargs.get('source') + if not self._source: + raise ValueError(f'Unsupported: {self}') + + def get_input_args(self) -> List[str]: + kwargs = copy.copy(self._kwargs) + source = kwargs.pop('source') + return convert_kwargs_to_cmd_line_args(kwargs) + ['-i', source] + + @property + def source(self) -> str: + return self._source + + @property + def brief(self) -> str: + return os.path.basename(self.source) + + +class OutputNode(Node): + + def __init__(self, streams: List[Stream], args=None, kwargs=None): + super().__init__(NodeTypes.Output, streams, FilterableStream, OutputStream, + min_inputs=1, node_type=NodeTypes.Output, + args=args, kwargs=kwargs) + + self._source = kwargs.get('source') + if not self._source: + raise ValueError(f'Unsupported: {self}') + + def get_output_args(self, stream_tag_graph: Dict[Tuple[DagNode, str], str]) -> List[str]: + if len(self.incoming_edges) == 0: + raise ValueError(f'{self} has no mapped streams') + + args = copy.copy(self._args) + kwargs = copy.copy(self._kwargs) + + source = kwargs.pop('source') + + for edge in self.incoming_edges: + stream_tag = format_input_stream_tag(stream_tag_graph, edge, is_final=True) + if stream_tag != '0' or len(self.incoming_edges) > 1: + args += ['-map', stream_tag] + + return convert_kwargs_to_cmd_line_args(kwargs) + args + [source] + + @property + def source(self) -> str: + return self._source + + @property + def brief(self) -> str: + return os.path.basename(self.source) + + +class FilterNode(Node): + + def __init__(self, streams: List[Stream], label: str, min_inputs=1, + max_inputs=1, args=None, kwargs=None): + super().__init__(label, streams, FilterableStream, FilterableStream, + min_inputs=min_inputs, max_inputs=max_inputs, + node_type=NodeTypes.Filter, args=args, kwargs=kwargs) + + def get_filter_spec(self, outgoing_edges: Tuple[DagEdge] = None) -> str: + args = self._args or [] + kwargs = self._kwargs or {} + + if self.Label in {'split', 'asplit'} and outgoing_edges: + args = [len(outgoing_edges)] + else: + args = [escape(x, '\\\'=:') for x in args] + + kwargs = {escape(k, '\\\'=:'): escape(v, '\\\'=:') for k, v in kwargs.items()} + + args.extend([f'{key}={kwargs[key]}' for key in sorted(kwargs)]) + if args: + params = escape(self.Label, '\\\'=:') + '=' + ':'.join(args) + else: + params = escape(self.Label, '\\\'=:') + + return escape(params, '\\\'[],;') + + +class GlobalNode(Node): + + def __init__(self, stream: Stream, args=None, kwargs=None): + super().__init__(stream.Label, stream, OutputStream, OutputStream, + min_inputs=1, max_inputs=1, node_type=NodeTypes.Global, + args=args, kwargs=kwargs) + + def get_global_args(self) -> List[str]: + return list(self._args) + + +class MergeOutputsNode(Node): + + def __init__(self, streams: List[Stream]): + super().__init__(None, streams, OutputStream, OutputStream, + min_inputs=1, node_type='merge_outputs') diff --git a/ffmpeg/settings.py b/ffmpeg/settings.py new file mode 100644 index 0000000..f7b7abd --- /dev/null +++ b/ffmpeg/settings.py @@ -0,0 +1,11 @@ +''' +Date: 2021.04.24 23:34 +Description: Omit +LastEditors: Rustle Karl +LastEditTime: 2021.04.24 23:34 +''' +from .constants import H264_CUVID, H264_NVENC + +CUDA_ENABLE = True +DEFAULT_ENCODER = H264_NVENC +DEFAULT_DECODER = H264_CUVID diff --git a/ffmpeg/tools/__init__.py b/ffmpeg/tools/__init__.py new file mode 100644 index 0000000..4ef8c1b --- /dev/null +++ b/ffmpeg/tools/__init__.py @@ -0,0 +1,15 @@ +''' +Date: 2021.04.25 10:21 +Description : Omit +LastEditors: Rustle Karl +LastEditTime: 2021.04.25 10:21 +''' +from . import atools, iotools, vtools, avtools + + +__all__ = [ + 'atools', + 'avtools', + 'iotools', + 'vtools', +] diff --git a/ffmpeg/tools/atools.py b/ffmpeg/tools/atools.py new file mode 100644 index 0000000..d3302e2 --- /dev/null +++ b/ffmpeg/tools/atools.py @@ -0,0 +1,49 @@ +''' +Date: 2021.02-28 19:35:09 +LastEditors: Rustle Karl +LastEditTime: 2021.04.25 10:23:18 +''' +import re +import subprocess +import sys +from typing import List + +from .._ffmpeg import input +from ..constants import PCM_S16LE, S16LE + +__all__ = [ + "convert_audio_to_raw_pcm", + "detect_silence", +] + + +def convert_audio_to_raw_pcm(src) -> bytes: + raw, _ = input(src).output("-", format=S16LE, acodec=PCM_S16LE, + ac=1, ar="16k").run(capture_stdout=True) + return raw + + +def detect_silence(src, *, noise=-60, duration=2) -> List[List[float]]: + """Detect silence in an audio stream. + + This filter logs a message when it detects that the input audio volume is less or + equal to a noise tolerance value for a duration greater or equal to the minimum + detected noise duration. + + Args: + noise, n: Set noise tolerance. Can be specified in dB (in case "dB" is appended to the + specified value) or amplitude ratio. Default is -60dB, or 0.001. + duration, d: Set silence duration until notification (default is 2 seconds). + """ + silence_start = re.compile(r'silence_start: ([0-9]+\.?[0-9]*)') + silence_end = re.compile(r'silence_end: ([0-9]+\.?[0-9]*)') + + args = input(src).silencedetect(noise, duration).output("-", format="null").compile() + process = subprocess.Popen(args, stderr=subprocess.PIPE) + + info = process.communicate()[1].decode("utf-8") + if process.returncode != 0: + sys.stderr.write(info) + return + + return list(zip(map(float, silence_start.findall(info)), map(float, silence_end.findall(info)))) diff --git a/ffmpeg/tools/avtools.py b/ffmpeg/tools/avtools.py new file mode 100644 index 0000000..ad9e88f --- /dev/null +++ b/ffmpeg/tools/avtools.py @@ -0,0 +1,152 @@ +''' +Date: 2021.02-25 20:50:07 +LastEditors: Rustle Karl +LastEditTime: 2021.04.25 10:24:18 +''' +import tempfile +from pathlib import Path +from typing import Dict, List, Optional, Union + +from .. import constants +from .._ffmpeg import input, merge_outputs +from .._utils import seconds_to_string + +__all__ = [ + "adjust_tempo", + "concat_multiple_parts", + "cut_into_multiple_parts", + "merge_video_audio", + "separate_audio_stream", + "separate_video_stream", +] + + +def adjust_tempo(src: Union[str, Path], dst: Union[str, Path], *, vtempo: float = 2, + atempo: float = 2, acodec=None, vcodec=None, **kwargs): + """Adjust audio and video playback speed. + + Args: + vtempo: video current playback speed * vtempo, -1 mean no video + atempo: audio current playback speed * atempo, -1 mean no audio + """ + _input = input(src) + + v_input = _input.video.setpts(f"{1 / vtempo}*PTS") + a_input = _input.audio.atempo(atempo) + + if vtempo == -1 and atempo == -1: + raise ValueError("`vtempo` and `atempo` cannot all be -1") + + elif vtempo == -1: + a_input.output(dst, acodec=acodec, **kwargs).run() + elif atempo == -1: + v_input.output(dst, vcodec=vcodec, **kwargs).run() + else: + v_input.output(a_input, dst, acodec=acodec, vcodec=vcodec, **kwargs).run() + + +def modify_metadata(src: Union[str, Path], dst: Union[str, Path], *, + metadata: Dict[str, Union[str, int]], specifier: Optional[str] = None): + """Set a metadata key/value pair. + + An optional specifier may be given to set metadata on streams, chapters or programs. + """ + if not metadata: + raise ValueError("Provide at least one metadata %s" % metadata) + + specifier = "-metadata:" + specifier if specifier else "-metadata" + + args = [] + for k, v in metadata.items(): + args.extend([specifier, f"{k}={v}"]) + + input(src).output(dst, vcodec=constants.COPY, acodec=constants.COPY, args=args).run() + + +def separate_video_stream(src: Union[str, Path], dst: Union[str, Path]): + input(src).output(dst, an=True, vcodec="copy").run() + + +def separate_audio_stream(src: Union[str, Path], dst: Union[str, Path]): + input(src).output(dst, vn=True, acodec="copy").run() + + +def convert_format(src: Union[str, Path], dst: Union[str, Path], *, + format=None, vcodec="copy", acodec="copy"): + input(src).output(dst, format=format, acodec=acodec, vcodec=vcodec).run() + + +def cut_into_multiple_parts(src: Union[str, Path], dst: Union[str, Path], + *, durations: List[float], vcodec="libx264"): + """Cut the video or audio into multiple parts. + + Example: + avutils.cut_into_multiple_parts("video.mp4", [10, 10, 10, None]) + avutils.cut_into_multiple_parts("music.mp3", [-10, 10, -10, None]) + """ + if not isinstance(durations, (list, tuple)): + raise ValueError + + if len(durations) < 2: + raise ValueError(f'Expected at least 2 duration values; got {len(durations)}') + + outs = [] + raw = input(src) + path = Path(src) + start_position = 0 + + for order, duration in enumerate(durations): + # skip + if duration is not None and duration < 0: + start_position -= duration + continue + + outs.append(raw.output(f"{dst / path.stem}_part{order}{path.suffix}", + acodec="copy", vcodec=vcodec, + start_position=seconds_to_string(start_position), + duration=duration)) + + if duration is not None: + start_position += duration + + merge_outputs(*outs).run() + + +def cut_one_part(src: Union[str, Path], dst: Union[str, Path], *, vcodec="libx264", + start: Union[str, int, float] = None, end: Union[str, int, float] = None, + duration: Union[int, float] = None, only_video=False, only_audio=False): + '''Intercept a piece of audio or video from audio or video''' + if isinstance(start, (int, float)) and isinstance(end, (int, float)): + end = start + duration if end == 0 or end < start else end + + av = input(src) + a = av.audio.atrim(start=start, end=end, duration=duration) + v = av.video.trim(start=start, end=end, duration=duration) + + if only_video: + v.output(dst, vcodec=vcodec).run() + elif only_audio: + a.output(dst, vcodec=vcodec).run() + else: + v.output(a, dst, vcodec=vcodec).run() + + +def merge_video_audio(v_src: Union[str, Path], a_src: Union[str, Path], + dst: Union[str, Path], vcodec="copy", acodec="copy"): + v_input = input(v_src).video + a_input = input(a_src).audio + v_input.output(a_input, dst, acodec=acodec, vcodec=vcodec).run() + + +def concat_multiple_parts(dst: Union[str, Path], *files: Union[str, Path], + vcodec="copy", acodec="copy"): + concat = tempfile.mktemp() + + with open(concat, "w", encoding="utf-8") as fp: + for file in files: + fp.write("file '%s'\n" % Path(file).absolute().as_posix()) + + # https://stackoverflow.com/questions/38996925/ffmpeg-concat-unsafe-file-name/56029574 + input(concat, format="concat", safe=0).output(dst, acodec=acodec, vcodec=vcodec).run() + + Path(concat).unlink(missing_ok=True) diff --git a/ffmpeg/tools/iotools.py b/ffmpeg/tools/iotools.py new file mode 100644 index 0000000..636d3f7 --- /dev/null +++ b/ffmpeg/tools/iotools.py @@ -0,0 +1,75 @@ +''' +Date: 2021.03.06 17:33:38 +LastEditors: Rustle Karl +LastEditTime: 2021.04.25 10:24:34 +''' +import signal +from pathlib import Path +from typing import Union + +import psutil + +from .._ffmpeg import input +from ..constants import LINUX, WINDOWS + + +def record_screen_windows(dst: Union[str, Path], *, area="desktop", duration=None, + frame_rate=30, offset_x=0, offset_y=0, video_size="vga", + output_vcodec="libx264", output_acodec="libfaac", + output_format="flv", run=True, **output_kwargs): + """https://ffmpeg.org/ffmpeg-all.html#gdigrab""" + command = input(area, format="gdigrab", frame_rate=frame_rate, offset_x=offset_x, + offset_y=offset_y, video_size=video_size, duration=duration). \ + output(dst, vcodec=output_vcodec, acodec=output_acodec, + format=output_format, **output_kwargs) + if run: + command.run(capture_stdout=False, capture_stderr=False) + else: + return command + + +class ScreenRecorder(object): + + def __init__(self, dst: Union[str, Path], *, area="desktop", + frame_rate=30, offset_x=0, offset_y=0, video_size="vga", + duration=None, output_vcodec="libx264", output_acodec="libfaac", + output_format="flv", **output_kwargs): + + if WINDOWS: + self.command = record_screen_windows( + dst, area=area, frame_rate=frame_rate, duration=duration, + offset_x=offset_x, offset_y=offset_y, video_size=video_size, + output_vcodec=output_vcodec, output_acodec=output_acodec, + output_format=output_format, run=False, **output_kwargs + ) + elif LINUX: + raise NotImplementedError + else: + raise NotImplementedError + + self.proc: psutil.Process = None + self.paused = False + + def start(self): + if self.proc is None: + _proc = self.command.run_async(quiet=True) + self.proc = psutil.Process(_proc.pid) + elif self.paused: + self.proc.resume() + self.paused = False + + def pause(self): + if self.proc is None or self.paused: + return + else: + self.proc.suspend() + self.paused = True + + def resume(self): + self.start() + + def stop(self): + if self.proc is None: + return + + self.proc.send_signal(signal.CTRL_C_EVENT) diff --git a/ffmpeg/tools/rtmp.py b/ffmpeg/tools/rtmp.py new file mode 100644 index 0000000..3ca697e --- /dev/null +++ b/ffmpeg/tools/rtmp.py @@ -0,0 +1,23 @@ +''' +Date: 2021.03.05 09:36:19 +LastEditors: Rustle Karl +LastEditTime: 2021.03.05 12:26:09 +''' +from pathlib import Path +from typing import Union + +from .. import FFprobe +from .._ffmpeg import input + + +def start_one_stream_loop(src: Union[str, Path], *, loop: int = -1, codec="copy", + vcodec="copy", acodec="copy", format="mpegts", + source_url: str = "udp://localhost:10240"): + input(src, stream_loop=loop, re=None) \ + .output(source_url, codec=codec, vcodec=vcodec, + acodec=acodec, format=format). \ + run(capture_stdout=False, capture_stderr=False) + + +def detect_source_stream(source_url: str, timeout: int = 3) -> dict: + return FFprobe(source_url, timeout=timeout).metadata diff --git a/ffmpeg/tools/vtools.py b/ffmpeg/tools/vtools.py new file mode 100644 index 0000000..a276578 --- /dev/null +++ b/ffmpeg/tools/vtools.py @@ -0,0 +1,166 @@ +''' +Date: 2021.03.01 19:46:08 +LastEditors: Rustle Karl +LastEditTime: 2021.04.25 13:07:29 +''' +import os +from pathlib import Path +from typing import Union + +import numpy as np + +from .. import vfilters +from .._ffmpeg import input +from .._ffprobe import FFprobe +from ..constants import PIPE, RAW_VIDEO, RGB24 + +__all__ = [ + "assemble_video_from_images", + "compare_2_videos", + "convert_video_to_np_array", + "generate_video_thumbnail", + "hstack_videos", + "read_frame_as_jpeg", + "side_by_side_2_videos", + "timed_video_screenshot", + "video_add_image_watermark", + "video_add_text_watermark", + "vstack_videos", +] + + +# TODO +def capture_x11_screen(dst: Union[str, Path], *, screen: str = None, + duration: int = 3, frame_rate: int = 25): + screen = os.environ["DISPLAY"] if screen is None else screen + + input(screen, duration=duration, format="x11grab", + video_size="cif", framerate=frame_rate).output(dst).run() + + +def capture_video_key_frame(src: Union[str, Path], dst: Union[str, Path]): + if not os.path.isdir(dst): + dst = os.path.dirname(dst) + + input(src).output(os.path.join(dst, Path(src).stem + "_key_frame_%d.png"), + video_filter="select='eq(pict_type,PICT_TYPE_I)'", vsync="vfr").run() + + +def timed_video_screenshot(src: Union[str, Path], dst: Union[str, Path], interval=3): + os.makedirs(dst, exist_ok=True) + input(src).output(os.path.join(dst, Path(src).stem + "_screenshot_%d.png"), + video_filter=f"fps=1/{interval}").run() + + +def flip_mirror_video(src: Union[str, Path], dst: Union[str, Path], *, + horizontal=True, keep_audio=True, hwaccel: str = None, + output_vcodec: str = None, **output_kwargs): + input_v = input(src, hwaccel=hwaccel) + + if horizontal: + stream = input_v.pad(w="2*iw").overlay(input_v.hflip(), x="w") + else: + stream = input_v.pad(h="2*ih").overlay(input_v.vflip(), y="h") + + if keep_audio: + stream.output(input_v.audio, dst, acodec="copy", + vcodec=output_vcodec, **output_kwargs).run() + else: + stream.output(dst, vcodec=output_vcodec, **output_kwargs).run() + + +def compare_2_videos(v1: Union[str, Path], v2: Union[str, Path], + dst: Union[str, Path], horizontal=True): + if horizontal: + hstack_videos(dst, v1, v2) # Fastest + # input(v1).pad(w="2*iw").overlay(input(v2), x="w").output(dst).run() + else: + vstack_videos(dst, v1, v2) # Fastest + # side_by_side_2_videos(v1, v2, dst, False) # Fast + # input(v1).pad(h="2*ih").overlay(input(v2), y="h").output(dst).run() # Slowest + + +def side_by_side_2_videos(v1: Union[str, Path], v2: Union[str, Path], + dst: Union[str, Path], horizontal=True): + vfilters.framepack(input(v1), input(v2), format="sbs" if horizontal else "tab").output(dst).run() + + +def hstack_videos(dst: Union[str, Path], *videos: Union[str, Path]): + vfilters.hstack(*list(map(input, videos)), inputs=len(videos), shortest=0).output(dst).run() + + +def vstack_videos(dst: Union[str, Path], *videos: Union[str, Path]): + vfilters.vstack(*list(map(input, videos)), inputs=len(videos), shortest=0).output(dst).run() + + +def xstack_videos(*videos: Union[str, Path], dst: Union[str, Path], layout: str, fill: str = None): + vfilters.xstack(*list(map(input, videos)), inputs=len(videos), + layout=layout, shortest=0, fill=fill).output(dst).run() + + +def concat_2_videos_with_gltransition(dst: Union[str, Path], *videos: Union[str, Path], + offset: float = 0, duration: float = 0, source: Union[str, Path] = None): + if len(videos) < 2: + raise ValueError(f'Expected at least 2 videos; got {len(videos)}') + + in1, in2 = input(videos[0]), input(videos[1]) + vfilters.gltransition(in1, in2, offset=offset, duration=duration, + source=source).output(dst).run() + + +def concat_2_videos_with_xfade(dst: Union[str, Path], *videos: Union[str, Path], + transition: str = None, duration: float = None, + offset: float = None, expr: str = None, + hwaccel: str = None, output_vcodec: str = None): + if len(videos) < 2: + raise ValueError(f'Expected at least 2 videos; got {len(videos)}') + + in1, in2 = input(videos[0], hwaccel=hwaccel), input(videos[1], hwaccel=hwaccel) + vfilters.xfade(in1, in2, transition=transition, duration=duration, offset=offset, expr=expr). \ + output(dst, vcodec=output_vcodec).run() + + +def video_add_image_watermark(v_src: Union[str, Path], i_src: Union[str, Path], + dst: Union[str, Path], *, w: int = 0, h: int = 0, + x: int = 0, y: int = 0, _eval='init', ): + v_input = input(v_src) + i_input = input(i_src).scale(w, h) + v_input.overlay(i_input, x=x, y=y, eval=_eval).output(v_input.audio, dst, acodec="copy").run() + + +def video_add_text_watermark(v_src, dst, *, text: str, x: int = 0, y: int = 0, + fontsize: int = 24, fontfile: Union[str, Path] = None, + keep_audio=True): + v_input = input(v_src) + stream = v_input.drawtext(text=text, x=x, y=y, fontsize=fontsize, fontfile=fontfile) + + if keep_audio: + stream.output(v_input.audio, dst, acodec="copy").run() + else: + stream.output(dst).run() + + +def assemble_video_from_images(glob_pattern, dst, *, pattern_type="glob", frame_rate=25): + # https://stackoverflow.com/questions/31201164/ffmpeg-error-pattern-type-glob-was-selected-but-globbing-is-not-support-ed-by + if pattern_type: + input(glob_pattern, frame_rate=frame_rate, pattern_type=pattern_type).output(dst).run() + else: + input(glob_pattern, frame_rate=frame_rate).output(dst).run() + + +def convert_video_to_np_array(src, *, width=0, height=0) -> np.ndarray: + width_, height_ = FFprobe(src).video_scale + stdout, _ = input(src, enable_cuda=False). \ + output(PIPE, format=RAW_VIDEO, pixel_format=RGB24, enable_cuda=False).run() + return np.frombuffer(stdout, np.uint8).reshape([-1, height or height_, width or width_, 3]) + + +def read_frame_as_jpeg(src, frame=1) -> bytes: + raw, _ = input(src, enable_cuda=False).select(f"gte(n, {frame})"). \ + output(PIPE, vframes=1, format='image2', vcodec='mjpeg', enable_cuda=False). \ + run(capture_stdout=True) + return raw + + +def generate_video_thumbnail(src, dst, *, start_position=1, width=-1, height=-1): + input(src, start_position=start_position).scale(width, height).output(dst, vframes=1).run() diff --git a/ffmpeg/transitions/__init__.py b/ffmpeg/transitions/__init__.py new file mode 100644 index 0000000..6a6c6e7 --- /dev/null +++ b/ffmpeg/transitions/__init__.py @@ -0,0 +1,16 @@ +''' +Date: 2021.04.25 20:19:14 +LastEditors: Rustle Karl +LastEditTime: 2021.04.25 20:20:09 +''' +from ._gltransition import All as GLTransitionAll +from ._gltransition import GLTransition +from ._xfade import All as XFadeAll +from ._xfade import XFade + +__all__ = [ + "GLTransition", + "GLTransitionAll", + "XFade", + "XFadeAll", +] diff --git a/ffmpeg/transitions/_gltransition.py b/ffmpeg/transitions/_gltransition.py new file mode 100644 index 0000000..7de96e2 --- /dev/null +++ b/ffmpeg/transitions/_gltransition.py @@ -0,0 +1,90 @@ +''' +Date: 2021.03.07 22:24:00 +LastEditors: Rustle Karl +LastEditTime: 2021.03.07 22:28:36 +''' +from pathlib import Path + +_dst = Path(__file__).absolute().parent / "glsl" + + +class GLTransition(object): + Angular = _dst / "angular.glsl" + Bounce = _dst / "Bounce.glsl" + BowTieHorizontal = _dst / "BowTieHorizontal.glsl" + BowTieVertical = _dst / "BowTieVertical.glsl" + BowTieWithParameter = _dst / "BowTieWithParameter.glsl" + Burn = _dst / "burn.glsl" + ButterflyWaveScrawler = _dst / "ButterflyWaveScrawler.glsl" + CannabisLeaf = _dst / "cannabisleaf.glsl" + Circle = _dst / "circle.glsl" + CircleCrop = _dst / "CircleCrop.glsl" + CircleOpen = _dst / "circleopen.glsl" + ColorPhase = _dst / "colorphase.glsl" + ColourDistance = _dst / "ColourDistance.glsl" + CrazyParametricFun = _dst / "CrazyParametricFun.glsl" + Crosshatch = _dst / "crosshatch.glsl" + CrossWarp = _dst / "crosswarp.glsl" + CrossZoom = _dst / "CrossZoom.glsl" + Cube = _dst / "cube.glsl" + DirectionalEasing = _dst / "directional-easing.glsl" + Directional = _dst / "Directional.glsl" + DirectionalWarp = _dst / "directionalwarp.glsl" + DirectionalWipe = _dst / "directionalwipe.glsl" + Displacement = _dst / "displacement.glsl" + DoomScreenTransition = _dst / "DoomScreenTransition.glsl" + Doorway = _dst / "doorway.glsl" + Dreamy = _dst / "Dreamy.glsl" + DreamyZoom = _dst / "DreamyZoom.glsl" + Fade = _dst / "fade.glsl" + FadeColor = _dst / "fadecolor.glsl" + FadeGrayscale = _dst / "fadegrayscale.glsl" + FilmBurn = _dst / "FilmBurn.glsl" + FlyEye = _dst / "flyeye.glsl" + GlitchDisplace = _dst / "GlitchDisplace.glsl" + GlitchMemories = _dst / "GlitchMemories.glsl" + GridFlip = _dst / "GridFlip.glsl" + Heart = _dst / "heart.glsl" + Hexagonalize = _dst / "hexagonalize.glsl" + InvertedPageCurl = _dst / "InvertedPageCurl.glsl" + Kaleidoscope = _dst / "kaleidoscope.glsl" + LeftRight = _dst / "LeftRight.glsl" + LinearBlur = _dst / "LinearBlur.glsl" + Luma = _dst / "luma.glsl" + LuminanceMelt = _dst / "luminance_melt.glsl" + Morph = _dst / "morph.glsl" + Mosaic = _dst / "Mosaic.glsl" + MultiplyBlend = _dst / "multiply_blend.glsl" + Perlin = _dst / "perlin.glsl" + Pinwheel = _dst / "pinwheel.glsl" + Pixelize = _dst / "pixelize.glsl" + PolarFunction = _dst / "polar_function.glsl" + PolkaDotsCurtain = _dst / "PolkaDotsCurtain.glsl" + Radial = _dst / "Radial.glsl" + RandomNoisex = _dst / "randomNoisex.glsl" + RandomSquares = _dst / "randomsquares.glsl" + Ripple = _dst / "ripple.glsl" + RotateScaleFade = _dst / "rotate_scale_fade.glsl" + SimpleZoom = _dst / "SimpleZoom.glsl" + SquaresWire = _dst / "squareswire.glsl" + Squeeze = _dst / "squeeze.glsl" + StereoViewer = _dst / "StereoViewer.glsl" + Swap = _dst / "swap.glsl" + Swirl = _dst / "Swirl.glsl" + TangentMotionBlur = _dst / "tangentMotionBlur.glsl" + TopBottom = _dst / "TopBottom.glsl" + WaterDrop = _dst / "WaterDrop.glsl" + Wind = _dst / "wind.glsl" + WindowBlinds = _dst / "windowblinds.glsl" + WindowSlice = _dst / "windowslice.glsl" + WipeDown = _dst / "wipeDown.glsl" + WipeLeft = _dst / "wipeLeft.glsl" + WipeRight = _dst / "wipeRight.glsl" + WipeUp = _dst / "wipeUp.glsl" + ZoomInCircles = _dst / "ZoomInCircles.glsl" + + +All = [v for k, v in vars(GLTransition).items() if not k.endswith("__")] + +if __name__ == '__main__': + print(All) diff --git a/ffmpeg/transitions/_xfade.py b/ffmpeg/transitions/_xfade.py new file mode 100644 index 0000000..6763b93 --- /dev/null +++ b/ffmpeg/transitions/_xfade.py @@ -0,0 +1,63 @@ +''' +Date: 2021.03.07 21:50:15 +LastEditors: Rustle Karl +LastEditTime: 2021.03.07 21:55:27 +''' + + +class XFade(object): + """Apply cross fade from one input video stream to another input video stream. + The cross fade is applied for specified duration. + + https://ffmpeg.org/ffmpeg-filters.html#xfade + """ + Circleclose = "circleclose" + Circlecrop = "circlecrop" + Circleopen = "circleopen" + Custom = "custom" + Diagbl = "diagbl" + Diagbr = "diagbr" + Diagtl = "diagtl" + Diagtr = "diagtr" + Dissolve = "dissolve" + Distance = "distance" + Fade = "fade" + Fadeblack = "fadeblack" + Fadegrays = "fadegrays" + Fadewhite = "fadewhite" + Hblur = "hblur" + Hlslice = "hlslice" + Horzopen = "horzopen" + Hrslice = "hrslice" + Pixelize = "pixelize" + Radial = "radial" + Rectcrop = "rectcrop" + Slidedown = "slidedown" + Slideleft = "slideleft" + Slideright = "slideright" + Slideup = "slideup" + Smoothdown = "smoothdown" + Smoothleft = "smoothleft" + Smoothright = "smoothright" + Smoothup = "smoothup" + Squeezeh = "squeezeh" + Squeezev = "squeezev" + Vdslice = "vdslice" + Vertclose = "vertclose" + Vertopen = "vertopen" + Vuslice = "vuslice" + Wipebl = "wipebl" + Wipebr = "wipebr" + Wipedown = "wipedown" + Wipeleft = "wipeleft" + Wiperight = "wiperight" + Wipetl = "wipetl" + Wipetr = "wipetr" + Wipeup = "wipeup" + Horzclose = "horzclose" + + +All = [v for k, v in vars(XFade).items() if not k.endswith("__")] + +if __name__ == '__main__': + print(All) diff --git a/ffmpeg/transitions/glsl/Bounce.glsl b/ffmpeg/transitions/glsl/Bounce.glsl new file mode 100644 index 0000000..7a6a161 --- /dev/null +++ b/ffmpeg/transitions/glsl/Bounce.glsl @@ -0,0 +1,29 @@ +// Author: Adrian Purser +// License: MIT + +uniform vec4 shadow_colour; // = vec4(0.,0.,0.,.6) +uniform float shadow_height; // = 0.075 +uniform float bounces; // = 3.0 + +const float PI = 3.14159265358; + +vec4 transition (vec2 uv) { + float time = progress; + float stime = sin(time * PI / 2.); + float phase = time * PI * bounces; + float y = (abs(cos(phase))) * (1.0 - stime); + float d = uv.y - y; + return mix( + mix( + getToColor(uv), + shadow_colour, + step(d, shadow_height) * (1. - mix( + ((d / shadow_height) * shadow_colour.a) + (1.0 - shadow_colour.a), + 1.0, + smoothstep(0.95, 1., progress) // fade-out the shadow at the end + )) + ), + getFromColor(vec2(uv.x, uv.y + (1.0 - y))), + step(d, 0.0) + ); +} diff --git a/ffmpeg/transitions/glsl/BowTieHorizontal.glsl b/ffmpeg/transitions/glsl/BowTieHorizontal.glsl new file mode 100644 index 0000000..6a5d562 --- /dev/null +++ b/ffmpeg/transitions/glsl/BowTieHorizontal.glsl @@ -0,0 +1,120 @@ +// Author: huynx +// License: MIT + +vec2 bottom_left = vec2(0.0, 1.0); +vec2 bottom_right = vec2(1.0, 1.0); +vec2 top_left = vec2(0.0, 0.0); +vec2 top_right = vec2(1.0, 0.0); +vec2 center = vec2(0.5, 0.5); + +float check(vec2 p1, vec2 p2, vec2 p3) +{ + return (p1.x - p3.x) * (p2.y - p3.y) - (p2.x - p3.x) * (p1.y - p3.y); +} + +bool PointInTriangle (vec2 pt, vec2 p1, vec2 p2, vec2 p3) +{ + bool b1, b2, b3; + b1 = check(pt, p1, p2) < 0.0; + b2 = check(pt, p2, p3) < 0.0; + b3 = check(pt, p3, p1) < 0.0; + return ((b1 == b2) && (b2 == b3)); +} + +bool in_left_triangle(vec2 p){ + vec2 vertex1, vertex2, vertex3; + vertex1 = vec2(progress, 0.5); + vertex2 = vec2(0.0, 0.5-progress); + vertex3 = vec2(0.0, 0.5+progress); + if (PointInTriangle(p, vertex1, vertex2, vertex3)) + { + return true; + } + return false; +} + +bool in_right_triangle(vec2 p){ + vec2 vertex1, vertex2, vertex3; + vertex1 = vec2(1.0-progress, 0.5); + vertex2 = vec2(1.0, 0.5-progress); + vertex3 = vec2(1.0, 0.5+progress); + if (PointInTriangle(p, vertex1, vertex2, vertex3)) + { + return true; + } + return false; +} + +float blur_edge(vec2 bot1, vec2 bot2, vec2 top, vec2 testPt) +{ + vec2 lineDir = bot1 - top; + vec2 perpDir = vec2(lineDir.y, -lineDir.x); + vec2 dirToPt1 = bot1 - testPt; + float dist1 = abs(dot(normalize(perpDir), dirToPt1)); + + lineDir = bot2 - top; + perpDir = vec2(lineDir.y, -lineDir.x); + dirToPt1 = bot2 - testPt; + float min_dist = min(abs(dot(normalize(perpDir), dirToPt1)), dist1); + + if (min_dist < 0.005) { + return min_dist / 0.005; + } + else { + return 1.0; + }; +} + + +vec4 transition (vec2 uv) { + if (in_left_triangle(uv)) + { + if (progress < 0.1) + { + return getFromColor(uv); + } + if (uv.x < 0.5) + { + vec2 vertex1 = vec2(progress, 0.5); + vec2 vertex2 = vec2(0.0, 0.5-progress); + vec2 vertex3 = vec2(0.0, 0.5+progress); + return mix( + getFromColor(uv), + getToColor(uv), + blur_edge(vertex2, vertex3, vertex1, uv) + ); + } + else + { + if (progress > 0.0) + { + return getToColor(uv); + } + else + { + return getFromColor(uv); + } + } + } + else if (in_right_triangle(uv)) + { + if (uv.x >= 0.5) + { + vec2 vertex1 = vec2(1.0-progress, 0.5); + vec2 vertex2 = vec2(1.0, 0.5-progress); + vec2 vertex3 = vec2(1.0, 0.5+progress); + return mix( + getFromColor(uv), + getToColor(uv), + blur_edge(vertex2, vertex3, vertex1, uv) + ); + } + else + { + return getFromColor(uv); + } + } + else { + return getFromColor(uv); + } +} \ No newline at end of file diff --git a/ffmpeg/transitions/glsl/BowTieVertical.glsl b/ffmpeg/transitions/glsl/BowTieVertical.glsl new file mode 100644 index 0000000..b6ca7d8 --- /dev/null +++ b/ffmpeg/transitions/glsl/BowTieVertical.glsl @@ -0,0 +1,114 @@ +// Author: huynx +// License: MIT + +float check(vec2 p1, vec2 p2, vec2 p3) +{ + return (p1.x - p3.x) * (p2.y - p3.y) - (p2.x - p3.x) * (p1.y - p3.y); +} + +bool PointInTriangle (vec2 pt, vec2 p1, vec2 p2, vec2 p3) +{ + bool b1, b2, b3; + b1 = check(pt, p1, p2) < 0.0; + b2 = check(pt, p2, p3) < 0.0; + b3 = check(pt, p3, p1) < 0.0; + return ((b1 == b2) && (b2 == b3)); +} + +bool in_top_triangle(vec2 p){ + vec2 vertex1, vertex2, vertex3; + vertex1 = vec2(0.5, progress); + vertex2 = vec2(0.5-progress, 0.0); + vertex3 = vec2(0.5+progress, 0.0); + if (PointInTriangle(p, vertex1, vertex2, vertex3)) + { + return true; + } + return false; +} + +bool in_bottom_triangle(vec2 p){ + vec2 vertex1, vertex2, vertex3; + vertex1 = vec2(0.5, 1.0 - progress); + vertex2 = vec2(0.5-progress, 1.0); + vertex3 = vec2(0.5+progress, 1.0); + if (PointInTriangle(p, vertex1, vertex2, vertex3)) + { + return true; + } + return false; +} + +float blur_edge(vec2 bot1, vec2 bot2, vec2 top, vec2 testPt) +{ + vec2 lineDir = bot1 - top; + vec2 perpDir = vec2(lineDir.y, -lineDir.x); + vec2 dirToPt1 = bot1 - testPt; + float dist1 = abs(dot(normalize(perpDir), dirToPt1)); + + lineDir = bot2 - top; + perpDir = vec2(lineDir.y, -lineDir.x); + dirToPt1 = bot2 - testPt; + float min_dist = min(abs(dot(normalize(perpDir), dirToPt1)), dist1); + + if (min_dist < 0.005) { + return min_dist / 0.005; + } + else { + return 1.0; + }; +} + + +vec4 transition (vec2 uv) { + if (in_top_triangle(uv)) + { + if (progress < 0.1) + { + return getFromColor(uv); + } + if (uv.y < 0.5) + { + vec2 vertex1 = vec2(0.5, progress); + vec2 vertex2 = vec2(0.5-progress, 0.0); + vec2 vertex3 = vec2(0.5+progress, 0.0); + return mix( + getFromColor(uv), + getToColor(uv), + blur_edge(vertex2, vertex3, vertex1, uv) + ); + } + else + { + if (progress > 0.0) + { + return getToColor(uv); + } + else + { + return getFromColor(uv); + } + } + } + else if (in_bottom_triangle(uv)) + { + if (uv.y >= 0.5) + { + vec2 vertex1 = vec2(0.5, 1.0-progress); + vec2 vertex2 = vec2(0.5-progress, 1.0); + vec2 vertex3 = vec2(0.5+progress, 1.0); + return mix( + getFromColor(uv), + getToColor(uv), + blur_edge(vertex2, vertex3, vertex1, uv) + ); + } + else + { + return getFromColor(uv); + } + } + else { + return getFromColor(uv); + } +} \ No newline at end of file diff --git a/ffmpeg/transitions/glsl/BowTieWithParameter.glsl b/ffmpeg/transitions/glsl/BowTieWithParameter.glsl new file mode 100644 index 0000000..19ebf8e --- /dev/null +++ b/ffmpeg/transitions/glsl/BowTieWithParameter.glsl @@ -0,0 +1,70 @@ +// Author:KMojek +// License: MIT + +uniform float adjust; // = 0.5; +uniform bool reverse; // = false; + +float check(vec2 p1, vec2 p2, vec2 p3) +{ + return (p1.x - p3.x) * (p2.y - p3.y) - (p2.x - p3.x) * (p1.y - p3.y); +} + +bool pointInTriangle(vec2 pt, vec2 p1, vec2 p2, vec2 p3) +{ + + bool b1 = check(pt, p1, p2) < 0.0; + bool b2 = check(pt, p2, p3) < 0.0; + bool b3 = check(pt, p3, p1) < 0.0; + return b1 == b2 && b2 == b3; +} + +const float height = 0.5; + +vec4 transition_firstHalf( vec2 uv, float prog ) +{ + if ( uv.y < 0.5 ) + { + vec2 botLeft = vec2( -0., prog-height ); + vec2 botRight = vec2( 1., prog-height ); + vec2 tip = vec2( adjust, prog ); + if ( pointInTriangle( uv, botLeft, botRight, tip ) ) + return getToColor(uv); + } + else + { + vec2 topLeft = vec2( -0., 1.-prog+height ); + vec2 topRight = vec2( 1., 1.-prog+height ); + vec2 tip = vec2( adjust, 1.-prog ); + if ( pointInTriangle( uv, topLeft, topRight, tip ) ) + return getToColor( uv ); + } + return getFromColor( uv ); +} + +vec4 transition_secondHalf( vec2 uv, float prog ) +{ + if ( uv.x > adjust ) + { + vec2 top = vec2( prog + height, 1. ); + vec2 bot = vec2( prog + height, -0. ); + vec2 tip = vec2( mix( adjust, 1.0, 2.0 * (prog - 0.5) ), 0.5 ); + if ( pointInTriangle( uv, top, bot, tip) ) + return getFromColor( uv ); + } + else + { + vec2 top = vec2( 1.0-prog - height, 1. ); + vec2 bot = vec2( 1.0-prog - height, -0. ); + vec2 tip = vec2( mix( adjust, 0.0, 2.0 * (prog - 0.5) ), 0.5 ); + if ( pointInTriangle( uv, top, bot, tip) ) + return getFromColor( uv ); + } + return getToColor( uv ); +} + +vec4 transition (vec2 uv) { + if ( reverse ) + return ( progress < 0.5 ) ? transition_secondHalf( uv, 1.-progress ) : transition_firstHalf( uv, 1.-progress ); + else + return ( progress < 0.5 ) ? transition_firstHalf( uv, progress ) : transition_secondHalf( uv, progress ); +} \ No newline at end of file diff --git a/ffmpeg/transitions/glsl/ButterflyWaveScrawler.glsl b/ffmpeg/transitions/glsl/ButterflyWaveScrawler.glsl new file mode 100644 index 0000000..d7ba52a --- /dev/null +++ b/ffmpeg/transitions/glsl/ButterflyWaveScrawler.glsl @@ -0,0 +1,28 @@ +// Author: mandubian +// License: MIT +uniform float amplitude; // = 1.0 +uniform float waves; // = 30.0 +uniform float colorSeparation; // = 0.3 +float PI = 3.14159265358979323846264; +float compute(vec2 p, float progress, vec2 center) { +vec2 o = p*sin(progress * amplitude)-center; +// horizontal vector +vec2 h = vec2(1., 0.); +// butterfly polar function (don't ask me why this one :)) +float theta = acos(dot(o, h)) * waves; +return (exp(cos(theta)) - 2.*cos(4.*theta) + pow(sin((2.*theta - PI) / 24.), 5.)) / 10.; +} +vec4 transition(vec2 uv) { + vec2 p = uv.xy / vec2(1.0).xy; + float inv = 1. - progress; + vec2 dir = p - vec2(.5); + float dist = length(dir); + float disp = compute(p, progress, vec2(0.5, 0.5)) ; + vec4 texTo = getToColor(p + inv*disp); + vec4 texFrom = vec4( + getFromColor(p + progress*disp*(1.0 - colorSeparation)).r, + getFromColor(p + progress*disp).g, + getFromColor(p + progress*disp*(1.0 + colorSeparation)).b, + 1.0); + return texTo*progress + texFrom*inv; +} diff --git a/ffmpeg/transitions/glsl/CircleCrop.glsl b/ffmpeg/transitions/glsl/CircleCrop.glsl new file mode 100644 index 0000000..af68fad --- /dev/null +++ b/ffmpeg/transitions/glsl/CircleCrop.glsl @@ -0,0 +1,17 @@ +// License: MIT +// Author: fkuteken +// ported by gre from https://gist.github.com/fkuteken/f63e3009c1143950dee9063c3b83fb88 + +uniform vec4 bgcolor; // = vec4(0.0, 0.0, 0.0, 1.0) + +vec2 ratio2 = vec2(1.0, 1.0 / ratio); +float s = pow(2.0 * abs(progress - 0.5), 3.0); + +vec4 transition(vec2 p) { + float dist = length((vec2(p) - 0.5) * ratio2); + return mix( + progress < 0.5 ? getFromColor(p) : getToColor(p), // branching is ok here as we statically depend on progress uniform (branching won't change over pixels) + bgcolor, + step(s, dist) + ); +} diff --git a/ffmpeg/transitions/glsl/ColourDistance.glsl b/ffmpeg/transitions/glsl/ColourDistance.glsl new file mode 100644 index 0000000..a3ee68c --- /dev/null +++ b/ffmpeg/transitions/glsl/ColourDistance.glsl @@ -0,0 +1,16 @@ +// License: MIT +// Author: P-Seebauer +// ported by gre from https://gist.github.com/P-Seebauer/2a5fa2f77c883dd661f9 + +uniform float power; // = 5.0 + +vec4 transition(vec2 p) { + vec4 fTex = getFromColor(p); + vec4 tTex = getToColor(p); + float m = step(distance(fTex, tTex), progress); + return mix( + mix(fTex, tTex, m), + tTex, + pow(progress, power) + ); +} diff --git a/ffmpeg/transitions/glsl/CrazyParametricFun.glsl b/ffmpeg/transitions/glsl/CrazyParametricFun.glsl new file mode 100644 index 0000000..a3dab61 --- /dev/null +++ b/ffmpeg/transitions/glsl/CrazyParametricFun.glsl @@ -0,0 +1,17 @@ +// Author: mandubian +// License: MIT + +uniform float a; // = 4 +uniform float b; // = 1 +uniform float amplitude; // = 120 +uniform float smoothness; // = 0.1 + +vec4 transition(vec2 uv) { + vec2 p = uv.xy / vec2(1.0).xy; + vec2 dir = p - vec2(.5); + float dist = length(dir); + float x = (a - b) * cos(progress) + b * cos(progress * ((a / b) - 1.) ); + float y = (a - b) * sin(progress) - b * sin(progress * ((a / b) - 1.)); + vec2 offset = dir * vec2(sin(progress * dist * amplitude * x), sin(progress * dist * amplitude * y)) / smoothness; + return mix(getFromColor(p + offset), getToColor(p), smoothstep(0.2, 1.0, progress)); +} diff --git a/ffmpeg/transitions/glsl/CrossZoom.glsl b/ffmpeg/transitions/glsl/CrossZoom.glsl new file mode 100644 index 0000000..498cf9c --- /dev/null +++ b/ffmpeg/transitions/glsl/CrossZoom.glsl @@ -0,0 +1,64 @@ +// License: MIT +// Author: rectalogic +// ported by gre from https://gist.github.com/rectalogic/b86b90161503a0023231 + +// Converted from https://github.com/rectalogic/rendermix-basic-effects/blob/master/assets/com/rendermix/CrossZoom/CrossZoom.frag +// Which is based on https://github.com/evanw/glfx.js/blob/master/src/filters/blur/zoomblur.js +// With additional easing functions from https://github.com/rectalogic/rendermix-basic-effects/blob/master/assets/com/rendermix/Easing/Easing.glsllib + +uniform float strength; // = 0.4 + +const float PI = 3.141592653589793; + +float Linear_ease(in float begin, in float change, in float duration, in float time) { + return change * time / duration + begin; +} + +float Exponential_easeInOut(in float begin, in float change, in float duration, in float time) { + if (time == 0.0) + return begin; + else if (time == duration) + return begin + change; + time = time / (duration / 2.0); + if (time < 1.0) + return change / 2.0 * pow(2.0, 10.0 * (time - 1.0)) + begin; + return change / 2.0 * (-pow(2.0, -10.0 * (time - 1.0)) + 2.0) + begin; +} + +float Sinusoidal_easeInOut(in float begin, in float change, in float duration, in float time) { + return -change / 2.0 * (cos(PI * time / duration) - 1.0) + begin; +} + +float rand (vec2 co) { + return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453); +} + +vec3 crossFade(in vec2 uv, in float dissolve) { + return mix(getFromColor(uv).rgb, getToColor(uv).rgb, dissolve); +} + +vec4 transition(vec2 uv) { + vec2 texCoord = uv.xy / vec2(1.0).xy; + + // Linear interpolate center across center half of the image + vec2 center = vec2(Linear_ease(0.25, 0.5, 1.0, progress), 0.5); + float dissolve = Exponential_easeInOut(0.0, 1.0, 1.0, progress); + + // Mirrored sinusoidal loop. 0->strength then strength->0 + float strength = Sinusoidal_easeInOut(0.0, strength, 0.5, progress); + + vec3 color = vec3(0.0); + float total = 0.0; + vec2 toCenter = center - texCoord; + + /* randomize the lookup values to hide the fixed number of samples */ + float offset = rand(uv); + + for (float t = 0.0; t <= 40.0; t++) { + float percent = (t + offset) / 40.0; + float weight = 4.0 * (percent - percent * percent); + color += crossFade(texCoord + toCenter * percent * strength, dissolve) * weight; + total += weight; + } + return vec4(color / total, 1.0); +} diff --git a/ffmpeg/transitions/glsl/Directional.glsl b/ffmpeg/transitions/glsl/Directional.glsl new file mode 100644 index 0000000..5f57f74 --- /dev/null +++ b/ffmpeg/transitions/glsl/Directional.glsl @@ -0,0 +1,14 @@ +// Author: Gaëtan Renaudeau +// License: MIT + +uniform vec2 direction; // = vec2(0.0, 1.0) + +vec4 transition (vec2 uv) { + vec2 p = uv + progress * sign(direction); + vec2 f = fract(p); + return mix( + getToColor(f), + getFromColor(f), + step(0.0, p.y) * step(p.y, 1.0) * step(0.0, p.x) * step(p.x, 1.0) + ); +} diff --git a/ffmpeg/transitions/glsl/DoomScreenTransition.glsl b/ffmpeg/transitions/glsl/DoomScreenTransition.glsl new file mode 100644 index 0000000..01e06da --- /dev/null +++ b/ffmpeg/transitions/glsl/DoomScreenTransition.glsl @@ -0,0 +1,59 @@ +// Author: Zeh Fernando +// License: MIT + + +// Transition parameters -------- + +// Number of total bars/columns +uniform int bars; // = 30 + +// Multiplier for speed ratio. 0 = no variation when going down, higher = some elements go much faster +uniform float amplitude; // = 2 + +// Further variations in speed. 0 = no noise, 1 = super noisy (ignore frequency) +uniform float noise; // = 0.1 + +// Speed variation horizontally. the bigger the value, the shorter the waves +uniform float frequency; // = 0.5 + +// How much the bars seem to "run" from the middle of the screen first (sticking to the sides). 0 = no drip, 1 = curved drip +uniform float dripScale; // = 0.5 + + +// The code proper -------- + +float rand(int num) { + return fract(mod(float(num) * 67123.313, 12.0) * sin(float(num) * 10.3) * cos(float(num))); +} + +float wave(int num) { + float fn = float(num) * frequency * 0.1 * float(bars); + return cos(fn * 0.5) * cos(fn * 0.13) * sin((fn+10.0) * 0.3) / 2.0 + 0.5; +} + +float drip(int num) { + return sin(float(num) / float(bars - 1) * 3.141592) * dripScale; +} + +float pos(int num) { + return (noise == 0.0 ? wave(num) : mix(wave(num), rand(num), noise)) + (dripScale == 0.0 ? 0.0 : drip(num)); +} + +vec4 transition(vec2 uv) { + int bar = int(uv.x * (float(bars))); + float scale = 1.0 + pos(bar) * amplitude; + float phase = progress * scale; + float posY = uv.y / vec2(1.0).y; + vec2 p; + vec4 c; + if (phase + posY < 1.0) { + p = vec2(uv.x, uv.y + mix(0.0, vec2(1.0).y, phase)) / vec2(1.0).xy; + c = getFromColor(p); + } else { + p = uv.xy / vec2(1.0).xy; + c = getToColor(p); + } + + // Finally, apply the color + return c; +} diff --git a/ffmpeg/transitions/glsl/Dreamy.glsl b/ffmpeg/transitions/glsl/Dreamy.glsl new file mode 100644 index 0000000..389f221 --- /dev/null +++ b/ffmpeg/transitions/glsl/Dreamy.glsl @@ -0,0 +1,11 @@ +// Author: mikolalysenko +// License: MIT + +vec2 offset(float progress, float x, float theta) { + float phase = progress*progress + progress + theta; + float shifty = 0.03*progress*cos(10.0*(progress+x)); + return vec2(0, shifty); +} +vec4 transition(vec2 p) { + return mix(getFromColor(p + offset(progress, p.x, 0.0)), getToColor(p + offset(1.0-progress, p.x, 3.14)), progress); +} diff --git a/ffmpeg/transitions/glsl/DreamyZoom.glsl b/ffmpeg/transitions/glsl/DreamyZoom.glsl new file mode 100644 index 0000000..d035765 --- /dev/null +++ b/ffmpeg/transitions/glsl/DreamyZoom.glsl @@ -0,0 +1,40 @@ +// Author: Zeh Fernando +// License: MIT + +// Definitions -------- +#define DEG2RAD 0.03926990816987241548078304229099 // 1/180*PI + + +// Transition parameters -------- + +// In degrees +uniform float rotation; // = 6 + +// Multiplier +uniform float scale; // = 1.2 + + +// The code proper -------- + +vec4 transition(vec2 uv) { + // Massage parameters + float phase = progress < 0.5 ? progress * 2.0 : (progress - 0.5) * 2.0; + float angleOffset = progress < 0.5 ? mix(0.0, rotation * DEG2RAD, phase) : mix(-rotation * DEG2RAD, 0.0, phase); + float newScale = progress < 0.5 ? mix(1.0, scale, phase) : mix(scale, 1.0, phase); + + vec2 center = vec2(0, 0); + + // Calculate the source point + vec2 assumedCenter = vec2(0.5, 0.5); + vec2 p = (uv.xy - vec2(0.5, 0.5)) / newScale * vec2(ratio, 1.0); + + // This can probably be optimized (with distance()) + float angle = atan(p.y, p.x) + angleOffset; + float dist = distance(center, p); + p.x = cos(angle) * dist / ratio + 0.5; + p.y = sin(angle) * dist + 0.5; + vec4 c = progress < 0.5 ? getFromColor(p) : getToColor(p); + + // Finally, apply the color + return c + (progress < 0.5 ? mix(0.0, 1.0, phase) : mix(1.0, 0.0, phase)); +} diff --git a/ffmpeg/transitions/glsl/FilmBurn.glsl b/ffmpeg/transitions/glsl/FilmBurn.glsl new file mode 100644 index 0000000..7fbea49 --- /dev/null +++ b/ffmpeg/transitions/glsl/FilmBurn.glsl @@ -0,0 +1,59 @@ +// author: Anastasia Dunbar +// license: MIT +uniform float Seed; // = 2.31 +float sigmoid(float x, float a) { + float b = pow(x*2.,a)/2.; + if (x > .5) { + b = 1.-pow(2.-(x*2.),a)/2.; + } + return b; +} +float rand(float co){ + return fract(sin((co*24.9898)+Seed)*43758.5453); +} +float rand(vec2 co){ + return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453); +} +float apow(float a,float b) { return pow(abs(a),b)*sign(b); } +vec3 pow3(vec3 a,vec3 b) { return vec3(apow(a.r,b.r),apow(a.g,b.g),apow(a.b,b.b)); } +float smooth_mix(float a,float b,float c) { return mix(a,b,sigmoid(c,2.)); } +float random(vec2 co, float shft){ + co += 10.; + return smooth_mix(fract(sin(dot(co.xy ,vec2(12.9898+(floor(shft)*.5),78.233+Seed))) * 43758.5453),fract(sin(dot(co.xy ,vec2(12.9898+(floor(shft+1.)*.5),78.233+Seed))) * 43758.5453),fract(shft)); +} +float smooth_random(vec2 co, float shft) { + return smooth_mix(smooth_mix(random(floor(co),shft),random(floor(co+vec2(1.,0.)),shft),fract(co.x)),smooth_mix(random(floor(co+vec2(0.,1.)),shft),random(floor(co+vec2(1.,1.)),shft),fract(co.x)),fract(co.y)); +} +vec4 texture(vec2 p) { + return mix(getFromColor(p), getToColor(p), sigmoid(progress,10.)); +} +#define pi 3.14159265358979323 +#define clamps(x) clamp(x,0.,1.) + +vec4 transition(vec2 p) { + vec3 f = vec3(0.); + for (float i = 0.; i < 13.; i++) { + f += sin(((p.x*rand(i)*6.)+(progress*8.))+rand(i+1.43))*sin(((p.y*rand(i+4.4)*6.)+(progress*6.))+rand(i+2.4)); + f += 1.-clamps(length(p-vec2(smooth_random(vec2(progress*1.3),i+1.),smooth_random(vec2(progress*.5),i+6.25)))*mix(20.,70.,rand(i))); + } + f += 4.; + f /= 11.; + f = pow3(f*vec3(1.,0.7,0.6),vec3(1.,2.-sin(progress*pi),1.3)); + f *= sin(progress*pi); + + p -= .5; + p *= 1.+(smooth_random(vec2(progress*5.),6.3)*sin(progress*pi)*.05); + p += .5; + + vec4 blurred_image = vec4(0.); + float bluramount = sin(progress*pi)*.03; + #define repeats 50. + for (float i = 0.; i < repeats; i++) { + vec2 q = vec2(cos(degrees((i/repeats)*360.)),sin(degrees((i/repeats)*360.))) * (rand(vec2(i,p.x+p.y))+bluramount); + vec2 uv2 = p+(q*bluramount); + blurred_image += texture(uv2); + } + blurred_image /= repeats; + + return blurred_image+vec4(f,0.); +} diff --git a/ffmpeg/transitions/glsl/GlitchDisplace.glsl b/ffmpeg/transitions/glsl/GlitchDisplace.glsl new file mode 100644 index 0000000..03f8b9b --- /dev/null +++ b/ffmpeg/transitions/glsl/GlitchDisplace.glsl @@ -0,0 +1,79 @@ +// Author: Matt DesLauriers +// License: MIT + +#ifdef GL_ES +precision highp float; +#endif + +float random(vec2 co) +{ + float a = 12.9898; + float b = 78.233; + float c = 43758.5453; + float dt= dot(co.xy ,vec2(a,b)); + float sn= mod(dt,3.14); + return fract(sin(sn) * c); +} +float voronoi( in vec2 x ) { + vec2 p = floor( x ); + vec2 f = fract( x ); + float res = 8.0; + for( float j=-1.; j<=1.; j++ ) + for( float i=-1.; i<=1.; i++ ) { + vec2 b = vec2( i, j ); + vec2 r = b - f + random( p + b ); + float d = dot( r, r ); + res = min( res, d ); + } + return sqrt( res ); +} + +vec2 displace(vec4 tex, vec2 texCoord, float dotDepth, float textureDepth, float strength) { + float b = voronoi(.003 * texCoord + 2.0); + float g = voronoi(0.2 * texCoord); + float r = voronoi(texCoord - 1.0); + vec4 dt = tex * 1.0; + vec4 dis = dt * dotDepth + 1.0 - tex * textureDepth; + + dis.x = dis.x - 1.0 + textureDepth*dotDepth; + dis.y = dis.y - 1.0 + textureDepth*dotDepth; + dis.x *= strength; + dis.y *= strength; + vec2 res_uv = texCoord ; + res_uv.x = res_uv.x + dis.x - 0.0; + res_uv.y = res_uv.y + dis.y; + return res_uv; +} + +float ease1(float t) { + return t == 0.0 || t == 1.0 + ? t + : t < 0.5 + ? +0.5 * pow(2.0, (20.0 * t) - 10.0) + : -0.5 * pow(2.0, 10.0 - (t * 20.0)) + 1.0; +} +float ease2(float t) { + return t == 1.0 ? t : 1.0 - pow(2.0, -10.0 * t); +} + + + +vec4 transition(vec2 uv) { + vec2 p = uv.xy / vec2(1.0).xy; + vec4 color1 = getFromColor(p); + vec4 color2 = getToColor(p); + vec2 disp = displace(color1, p, 0.33, 0.7, 1.0-ease1(progress)); + vec2 disp2 = displace(color2, p, 0.33, 0.5, ease2(progress)); + vec4 dColor1 = getToColor(disp); + vec4 dColor2 = getFromColor(disp2); + float val = ease1(progress); + vec3 gray = vec3(dot(min(dColor2, dColor1).rgb, vec3(0.299, 0.587, 0.114))); + dColor2 = vec4(gray, 1.0); + dColor2 *= 2.0; + color1 = mix(color1, dColor2, smoothstep(0.0, 0.5, progress)); + color2 = mix(color2, dColor1, smoothstep(1.0, 0.5, progress)); + return mix(color1, color2, val); + //gl_FragColor = mix(gl_FragColor, dColor, smoothstep(0.0, 0.5, progress)); + + //gl_FragColor = mix(texture2D(from, p), texture2D(to, p), progress); +} diff --git a/ffmpeg/transitions/glsl/GlitchMemories.glsl b/ffmpeg/transitions/glsl/GlitchMemories.glsl new file mode 100644 index 0000000..342d63a --- /dev/null +++ b/ffmpeg/transitions/glsl/GlitchMemories.glsl @@ -0,0 +1,15 @@ +// author: Gunnar Roth +// based on work from natewave +// license: MIT +vec4 transition(vec2 p) { + vec2 block = floor(p.xy / vec2(16)); + vec2 uv_noise = block / vec2(64); + uv_noise += floor(vec2(progress) * vec2(1200.0, 3500.0)) / vec2(64); + vec2 dist = progress > 0.0 ? (fract(uv_noise) - 0.5) * 0.3 *(1.0 -progress) : vec2(0.0); + vec2 red = p + dist * 0.2; + vec2 green = p + dist * .3; + vec2 blue = p + dist * .5; + + return vec4(mix(getFromColor(red), getToColor(red), progress).r,mix(getFromColor(green), getToColor(green), progress).g,mix(getFromColor(blue), getToColor(blue), progress).b,1.0); +} + diff --git a/ffmpeg/transitions/glsl/GridFlip.glsl b/ffmpeg/transitions/glsl/GridFlip.glsl new file mode 100644 index 0000000..365bb71 --- /dev/null +++ b/ffmpeg/transitions/glsl/GridFlip.glsl @@ -0,0 +1,72 @@ +// License: MIT +// Author: TimDonselaar +// ported by gre from https://gist.github.com/TimDonselaar/9bcd1c4b5934ba60087bdb55c2ea92e5 + +uniform ivec2 size; // = ivec2(4) +uniform float pause; // = 0.1 +uniform float dividerWidth; // = 0.05 +uniform vec4 bgcolor; // = vec4(0.0, 0.0, 0.0, 1.0) +uniform float randomness; // = 0.1 + +float rand (vec2 co) { + return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453); +} + +float getDelta(vec2 p) { + vec2 rectanglePos = floor(vec2(size) * p); + vec2 rectangleSize = vec2(1.0 / vec2(size).x, 1.0 / vec2(size).y); + float top = rectangleSize.y * (rectanglePos.y + 1.0); + float bottom = rectangleSize.y * rectanglePos.y; + float left = rectangleSize.x * rectanglePos.x; + float right = rectangleSize.x * (rectanglePos.x + 1.0); + float minX = min(abs(p.x - left), abs(p.x - right)); + float minY = min(abs(p.y - top), abs(p.y - bottom)); + return min(minX, minY); +} + +float getDividerSize() { + vec2 rectangleSize = vec2(1.0 / vec2(size).x, 1.0 / vec2(size).y); + return min(rectangleSize.x, rectangleSize.y) * dividerWidth; +} + +vec4 transition(vec2 p) { + if(progress < pause) { + float currentProg = progress / pause; + float a = 1.0; + if(getDelta(p) < getDividerSize()) { + a = 1.0 - currentProg; + } + return mix(bgcolor, getFromColor(p), a); + } + else if(progress < 1.0 - pause){ + if(getDelta(p) < getDividerSize()) { + return bgcolor; + } else { + float currentProg = (progress - pause) / (1.0 - pause * 2.0); + vec2 q = p; + vec2 rectanglePos = floor(vec2(size) * q); + + float r = rand(rectanglePos) - randomness; + float cp = smoothstep(0.0, 1.0 - r, currentProg); + + float rectangleSize = 1.0 / vec2(size).x; + float delta = rectanglePos.x * rectangleSize; + float offset = rectangleSize / 2.0 + delta; + + p.x = (p.x - offset)/abs(cp - 0.5)*0.5 + offset; + vec4 a = getFromColor(p); + vec4 b = getToColor(p); + + float s = step(abs(vec2(size).x * (q.x - delta) - 0.5), abs(cp - 0.5)); + return mix(bgcolor, mix(b, a, step(cp, 0.5)), s); + } + } + else { + float currentProg = (progress - 1.0 + pause) / pause; + float a = 1.0; + if(getDelta(p) < getDividerSize()) { + a = currentProg; + } + return mix(bgcolor, getToColor(p), a); + } +} diff --git a/ffmpeg/transitions/glsl/InvertedPageCurl.glsl b/ffmpeg/transitions/glsl/InvertedPageCurl.glsl new file mode 100644 index 0000000..bc2e9ec --- /dev/null +++ b/ffmpeg/transitions/glsl/InvertedPageCurl.glsl @@ -0,0 +1,214 @@ +// author: Hewlett-Packard +// license: BSD 3 Clause +// Adapted by Sergey Kosarevsky from: +// http://rectalogic.github.io/webvfx/examples_2transition-shader-pagecurl_8html-example.html + +/* +Copyright (c) 2010 Hewlett-Packard Development Company, L.P. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Hewlett-Packard nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +in vec2 texCoord; +*/ + +const float MIN_AMOUNT = -0.16; +const float MAX_AMOUNT = 1.5; +float amount = progress * (MAX_AMOUNT - MIN_AMOUNT) + MIN_AMOUNT; + +const float PI = 3.141592653589793; + +const float scale = 512.0; +const float sharpness = 3.0; + +float cylinderCenter = amount; +// 360 degrees * amount +float cylinderAngle = 2.0 * PI * amount; + +const float cylinderRadius = 1.0 / PI / 2.0; + +vec3 hitPoint(float hitAngle, float yc, vec3 point, mat3 rrotation) +{ + float hitPoint = hitAngle / (2.0 * PI); + point.y = hitPoint; + return rrotation * point; +} + +vec4 antiAlias(vec4 color1, vec4 color2, float distanc) +{ + distanc *= scale; + if (distanc < 0.0) return color2; + if (distanc > 2.0) return color1; + float dd = pow(1.0 - distanc / 2.0, sharpness); + return ((color2 - color1) * dd) + color1; +} + +float distanceToEdge(vec3 point) +{ + float dx = abs(point.x > 0.5 ? 1.0 - point.x : point.x); + float dy = abs(point.y > 0.5 ? 1.0 - point.y : point.y); + if (point.x < 0.0) dx = -point.x; + if (point.x > 1.0) dx = point.x - 1.0; + if (point.y < 0.0) dy = -point.y; + if (point.y > 1.0) dy = point.y - 1.0; + if ((point.x < 0.0 || point.x > 1.0) && (point.y < 0.0 || point.y > 1.0)) return sqrt(dx * dx + dy * dy); + return min(dx, dy); +} + +vec4 seeThrough(float yc, vec2 p, mat3 rotation, mat3 rrotation) +{ + float hitAngle = PI - (acos(yc / cylinderRadius) - cylinderAngle); + vec3 point = hitPoint(hitAngle, yc, rotation * vec3(p, 1.0), rrotation); + if (yc <= 0.0 && (point.x < 0.0 || point.y < 0.0 || point.x > 1.0 || point.y > 1.0)) + { + return getToColor(p); + } + + if (yc > 0.0) return getFromColor(p); + + vec4 color = getFromColor(point.xy); + vec4 tcolor = vec4(0.0); + + return antiAlias(color, tcolor, distanceToEdge(point)); +} + +vec4 seeThroughWithShadow(float yc, vec2 p, vec3 point, mat3 rotation, mat3 rrotation) +{ + float shadow = distanceToEdge(point) * 30.0; + shadow = (1.0 - shadow) / 3.0; + + if (shadow < 0.0) shadow = 0.0; else shadow *= amount; + + vec4 shadowColor = seeThrough(yc, p, rotation, rrotation); + shadowColor.r -= shadow; + shadowColor.g -= shadow; + shadowColor.b -= shadow; + + return shadowColor; +} + +vec4 backside(float yc, vec3 point) +{ + vec4 color = getFromColor(point.xy); + float gray = (color.r + color.b + color.g) / 15.0; + gray += (8.0 / 10.0) * (pow(1.0 - abs(yc / cylinderRadius), 2.0 / 10.0) / 2.0 + (5.0 / 10.0)); + color.rgb = vec3(gray); + return color; +} + +vec4 behindSurface(vec2 p, float yc, vec3 point, mat3 rrotation) +{ + float shado = (1.0 - ((-cylinderRadius - yc) / amount * 7.0)) / 6.0; + shado *= 1.0 - abs(point.x - 0.5); + + yc = (-cylinderRadius - cylinderRadius - yc); + + float hitAngle = (acos(yc / cylinderRadius) + cylinderAngle) - PI; + point = hitPoint(hitAngle, yc, point, rrotation); + + if (yc < 0.0 && point.x >= 0.0 && point.y >= 0.0 && point.x <= 1.0 && point.y <= 1.0 && (hitAngle < PI || amount > 0.5)) + { + shado = 1.0 - (sqrt(pow(point.x - 0.5, 2.0) + pow(point.y - 0.5, 2.0)) / (71.0 / 100.0)); + shado *= pow(-yc / cylinderRadius, 3.0); + shado *= 0.5; + } + else + { + shado = 0.0; + } + return vec4(getToColor(p).rgb - shado, 1.0); +} + +vec4 transition(vec2 p) { + + const float angle = 100.0 * PI / 180.0; + float c = cos(-angle); + float s = sin(-angle); + + mat3 rotation = mat3( c, s, 0, + -s, c, 0, + -0.801, 0.8900, 1 + ); + c = cos(angle); + s = sin(angle); + + mat3 rrotation = mat3( c, s, 0, + -s, c, 0, + 0.98500, 0.985, 1 + ); + + vec3 point = rotation * vec3(p, 1.0); + + float yc = point.y - cylinderCenter; + + if (yc < -cylinderRadius) + { + // Behind surface + return behindSurface(p,yc, point, rrotation); + } + + if (yc > cylinderRadius) + { + // Flat surface + return getFromColor(p); + } + + float hitAngle = (acos(yc / cylinderRadius) + cylinderAngle) - PI; + + float hitAngleMod = mod(hitAngle, 2.0 * PI); + if ((hitAngleMod > PI && amount < 0.5) || (hitAngleMod > PI/2.0 && amount < 0.0)) + { + return seeThrough(yc, p, rotation, rrotation); + } + + point = hitPoint(hitAngle, yc, point, rrotation); + + if (point.x < 0.0 || point.y < 0.0 || point.x > 1.0 || point.y > 1.0) + { + return seeThroughWithShadow(yc, p, point, rotation, rrotation); + } + + vec4 color = backside(yc, point); + + vec4 otherColor; + if (yc < 0.0) + { + float shado = 1.0 - (sqrt(pow(point.x - 0.5, 2.0) + pow(point.y - 0.5, 2.0)) / 0.71); + shado *= pow(-yc / cylinderRadius, 3.0); + shado *= 0.5; + otherColor = vec4(0.0, 0.0, 0.0, shado); + } + else + { + otherColor = getFromColor(p); + } + + color = antiAlias(color, otherColor, cylinderRadius - abs(yc)); + + vec4 cl = seeThroughWithShadow(yc, p, point, rotation, rrotation); + float dist = distanceToEdge(point); + + return antiAlias(color, cl, dist); +} diff --git a/ffmpeg/transitions/glsl/LeftRight.glsl b/ffmpeg/transitions/glsl/LeftRight.glsl new file mode 100644 index 0000000..3814dd8 --- /dev/null +++ b/ffmpeg/transitions/glsl/LeftRight.glsl @@ -0,0 +1,25 @@ +// Author:zhmy +// License: MIT + +const vec4 black = vec4(0.0, 0.0, 0.0, 1.0); +const vec2 boundMin = vec2(0.0, 0.0); +const vec2 boundMax = vec2(1.0, 1.0); + +bool inBounds (vec2 p) { + return all(lessThan(boundMin, p)) && all(lessThan(p, boundMax)); +} + +vec4 transition (vec2 uv) { + vec2 spfr,spto = vec2(-1.); + + float size = mix(1.0, 3.0, progress*0.2); + spto = (uv + vec2(-0.5,-0.5))*vec2(size,size)+vec2(0.5,0.5); + spfr = (uv - vec2(1.-progress, 0.0)); + if(inBounds(spfr)){ + return getToColor(spfr); + }else if(inBounds(spto)){ + return getFromColor(spto) * (1.0 - progress); + } else{ + return black; + } +} \ No newline at end of file diff --git a/ffmpeg/transitions/glsl/LinearBlur.glsl b/ffmpeg/transitions/glsl/LinearBlur.glsl new file mode 100644 index 0000000..e32187b --- /dev/null +++ b/ffmpeg/transitions/glsl/LinearBlur.glsl @@ -0,0 +1,26 @@ +// author: gre +// license: MIT +uniform float intensity; // = 0.1 +const int passes = 6; + +vec4 transition(vec2 uv) { + vec4 c1 = vec4(0.0); + vec4 c2 = vec4(0.0); + + float disp = intensity*(0.5-distance(0.5, progress)); + for (int xi=0; xi.5) { + return getToColor(mrp); + } else { + return getFromColor(mrp); + } +} diff --git a/ffmpeg/transitions/glsl/PolkaDotsCurtain.glsl b/ffmpeg/transitions/glsl/PolkaDotsCurtain.glsl new file mode 100644 index 0000000..6ceb840 --- /dev/null +++ b/ffmpeg/transitions/glsl/PolkaDotsCurtain.glsl @@ -0,0 +1,10 @@ +// author: bobylito +// license: MIT +const float SQRT_2 = 1.414213562373; +uniform float dots;// = 20.0; +uniform vec2 center;// = vec2(0, 0); + +vec4 transition(vec2 uv) { + bool nextImage = distance(fract(uv * dots), vec2(0.5, 0.5)) < ( progress / distance(uv, center)); + return nextImage ? getToColor(uv) : getFromColor(uv); +} diff --git a/ffmpeg/transitions/glsl/Radial.glsl b/ffmpeg/transitions/glsl/Radial.glsl new file mode 100644 index 0000000..5ded8a2 --- /dev/null +++ b/ffmpeg/transitions/glsl/Radial.glsl @@ -0,0 +1,16 @@ +// License: MIT +// Author: Xaychru +// ported by gre from https://gist.github.com/Xaychru/ce1d48f0ce00bb379750 + +uniform float smoothness; // = 1.0 + +const float PI = 3.141592653589; + +vec4 transition(vec2 p) { + vec2 rp = p*2.-1.; + return mix( + getToColor(p), + getFromColor(p), + smoothstep(0., smoothness, atan(rp.y,rp.x) - (progress-.5) * PI * 2.5) + ); +} diff --git a/ffmpeg/transitions/glsl/SimpleZoom.glsl b/ffmpeg/transitions/glsl/SimpleZoom.glsl new file mode 100644 index 0000000..d6a886b --- /dev/null +++ b/ffmpeg/transitions/glsl/SimpleZoom.glsl @@ -0,0 +1,17 @@ +// Author: 0gust1 +// License: MIT + +uniform float zoom_quickness; // = 0.8 +float nQuick = clamp(zoom_quickness,0.2,1.0); + +vec2 zoom(vec2 uv, float amount) { + return 0.5 + ((uv - 0.5) * (1.0-amount)); +} + +vec4 transition (vec2 uv) { + return mix( + getFromColor(zoom(uv, smoothstep(0.0, nQuick, progress))), + getToColor(uv), + smoothstep(nQuick-0.2, 1.0, progress) + ); +} \ No newline at end of file diff --git a/ffmpeg/transitions/glsl/StereoViewer.glsl b/ffmpeg/transitions/glsl/StereoViewer.glsl new file mode 100644 index 0000000..4d4be4d --- /dev/null +++ b/ffmpeg/transitions/glsl/StereoViewer.glsl @@ -0,0 +1,210 @@ +// Tunable parameters +// How much to zoom (out) for the effect ~ 0.5 - 1.0 +uniform float zoom; // = 0.88 +// Corner radius as a fraction of the image height +uniform float corner_radius; // = 0.22 + +// author: Ted Schundler +// license: BSD 2 Clause +// Free for use and modification by anyone with credit + +// Copyright (c) 2016, Theodore K Schundler +// All rights reserved. + +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/////////////////////////////////////////////////////////////////////////////// +// Stereo Viewer Toy Transition // +// // +// Inspired by ViewMaster / Image3D image viewer devices. // +// This effect is similar to what you see when you press the device's lever. // +// There is a quick zoom in / out to make the transition 'valid' for GLSL.io // +/////////////////////////////////////////////////////////////////////////////// + +const vec4 black = vec4(0.0, 0.0, 0.0, 1.0); +const vec2 c00 = vec2(0.0, 0.0); // the four corner points +const vec2 c01 = vec2(0.0, 1.0); +const vec2 c11 = vec2(1.0, 1.0); +const vec2 c10 = vec2(1.0, 0.0); + +// Check if a point is within a given corner +bool in_corner(vec2 p, vec2 corner, vec2 radius) { + // determine the direction we want to be filled + vec2 axis = (c11 - corner) - corner; + + // warp the point so we are always testing the bottom left point with the + // circle centered on the origin + p = p - (corner + axis * radius); + p *= axis / radius; + return (p.x > 0.0 && p.y > -1.0) || (p.y > 0.0 && p.x > -1.0) || dot(p, p) < 1.0; +} + +// Check all four corners +// return a float for v2 for anti-aliasing? +bool test_rounded_mask(vec2 p, vec2 corner_size) { + return + in_corner(p, c00, corner_size) && + in_corner(p, c01, corner_size) && + in_corner(p, c10, corner_size) && + in_corner(p, c11, corner_size); +} + +// Screen blend mode - https://en.wikipedia.org/wiki/Blend_modes +// This more closely approximates what you see than linear blending +vec4 screen(vec4 a, vec4 b) { + return 1.0 - (1.0 - a) * (1.0 -b); +} + +// Given RGBA, find a value that when screened with itself +// will yield the original value. +vec4 unscreen(vec4 c) { + return 1.0 - sqrt(1.0 - c); +} + +// Grab a pixel, only if it isn't masked out by the rounded corners +vec4 sample_with_corners_from(vec2 p, vec2 corner_size) { + p = (p - 0.5) / zoom + 0.5; + if (!test_rounded_mask(p, corner_size)) { + return black; + } + return unscreen(getFromColor(p)); +} + +vec4 sample_with_corners_to(vec2 p, vec2 corner_size) { + p = (p - 0.5) / zoom + 0.5; + if (!test_rounded_mask(p, corner_size)) { + return black; + } + return unscreen(getToColor(p)); +} + +// special sampling used when zooming - extra zoom parameter and don't unscreen +vec4 simple_sample_with_corners_from(vec2 p, vec2 corner_size, float zoom_amt) { + p = (p - 0.5) / (1.0 - zoom_amt + zoom * zoom_amt) + 0.5; + if (!test_rounded_mask(p, corner_size)) { + return black; + } + return getFromColor(p); +} + +vec4 simple_sample_with_corners_to(vec2 p, vec2 corner_size, float zoom_amt) { + p = (p - 0.5) / (1.0 - zoom_amt + zoom * zoom_amt) + 0.5; + if (!test_rounded_mask(p, corner_size)) { + return black; + } + return getToColor(p); +} + +// Basic 2D affine transform matrix helpers +// These really shouldn't be used in a fragment shader - I should work out the +// the math for a translate & rotate function as a pair of dot products instead + +mat3 rotate2d(float angle, float ratio) { + float s = sin(angle); + float c = cos(angle); + return mat3( + c, s ,0.0, + -s, c, 0.0, + 0.0, 0.0, 1.0); +} + +mat3 translate2d(float x, float y) { + return mat3( + 1.0, 0.0, 0, + 0.0, 1.0, 0, + -x, -y, 1.0); +} + +mat3 scale2d(float x, float y) { + return mat3( + x, 0.0, 0, + 0.0, y, 0, + 0, 0, 1.0); +} + +// Split an image and rotate one up and one down along off screen pivot points +vec4 get_cross_rotated(vec3 p3, float angle, vec2 corner_size, float ratio) { + angle = angle * angle; // easing + angle /= 2.4; // works out to be a good number of radians + + mat3 center_and_scale = translate2d(-0.5, -0.5) * scale2d(1.0, ratio); + mat3 unscale_and_uncenter = scale2d(1.0, 1.0/ratio) * translate2d(0.5,0.5); + mat3 slide_left = translate2d(-2.0,0.0); + mat3 slide_right = translate2d(2.0,0.0); + mat3 rotate = rotate2d(angle, ratio); + + mat3 op_a = center_and_scale * slide_right * rotate * slide_left * unscale_and_uncenter; + mat3 op_b = center_and_scale * slide_left * rotate * slide_right * unscale_and_uncenter; + + vec4 a = sample_with_corners_from((op_a * p3).xy, corner_size); + vec4 b = sample_with_corners_from((op_b * p3).xy, corner_size); + + return screen(a, b); +} + +// Image stays put, but this time move two masks +vec4 get_cross_masked(vec3 p3, float angle, vec2 corner_size, float ratio) { + angle = 1.0 - angle; + angle = angle * angle; // easing + angle /= 2.4; + + vec4 img; + + mat3 center_and_scale = translate2d(-0.5, -0.5) * scale2d(1.0, ratio); + mat3 unscale_and_uncenter = scale2d(1.0 / zoom, 1.0 / (zoom * ratio)) * translate2d(0.5,0.5); + mat3 slide_left = translate2d(-2.0,0.0); + mat3 slide_right = translate2d(2.0,0.0); + mat3 rotate = rotate2d(angle, ratio); + + mat3 op_a = center_and_scale * slide_right * rotate * slide_left * unscale_and_uncenter; + mat3 op_b = center_and_scale * slide_left * rotate * slide_right * unscale_and_uncenter; + + bool mask_a = test_rounded_mask((op_a * p3).xy, corner_size); + bool mask_b = test_rounded_mask((op_b * p3).xy, corner_size); + + if (mask_a || mask_b) { + img = sample_with_corners_to(p3.xy, corner_size); + return screen(mask_a ? img : black, mask_b ? img : black); + } else { + return black; + } +} + +vec4 transition(vec2 uv) { + float a; + vec2 p=uv.xy/vec2(1.0).xy; + vec3 p3 = vec3(p.xy, 1.0); // for 2D matrix transforms + + // corner is warped to represent to size after mapping to 1.0, 1.0 + vec2 corner_size = vec2(corner_radius / ratio, corner_radius); + + if (progress <= 0.0) { + // 0.0: start with the base frame always + return getFromColor(p); + } else if (progress < 0.1) { + // 0.0-0.1: zoom out and add rounded corners + a = progress / 0.1; + return simple_sample_with_corners_from(p, corner_size * a, a); + } else if (progress < 0.48) { + // 0.1-0.48: Split original image apart + a = (progress - 0.1)/0.38; + return get_cross_rotated(p3, a, corner_size, ratio); + } else if (progress < 0.9) { + // 0.48-0.52: black + // 0.52 - 0.9: unmask new image + return get_cross_masked(p3, (progress - 0.52)/0.38, corner_size, ratio); + } else if (progress < 1.0) { + // zoom out and add rounded corners + a = (1.0 - progress) / 0.1; + return simple_sample_with_corners_to(p, corner_size * a, a); + } else { + // 1.0 end with base frame + return getToColor(p); + } +} diff --git a/ffmpeg/transitions/glsl/Swirl.glsl b/ffmpeg/transitions/glsl/Swirl.glsl new file mode 100644 index 0000000..b086066 --- /dev/null +++ b/ffmpeg/transitions/glsl/Swirl.glsl @@ -0,0 +1,31 @@ +// License: MIT +// Author: Sergey Kosarevsky +// ( http://www.linderdaum.com ) +// ported by gre from https://gist.github.com/corporateshark/cacfedb8cca0f5ce3f7c + +vec4 transition(vec2 UV) +{ + float Radius = 1.0; + + float T = progress; + + UV -= vec2( 0.5, 0.5 ); + + float Dist = length(UV); + + if ( Dist < Radius ) + { + float Percent = (Radius - Dist) / Radius; + float A = ( T <= 0.5 ) ? mix( 0.0, 1.0, T/0.5 ) : mix( 1.0, 0.0, (T-0.5)/0.5 ); + float Theta = Percent * Percent * A * 8.0 * 3.14159; + float S = sin( Theta ); + float C = cos( Theta ); + UV = vec2( dot(UV, vec2(C, -S)), dot(UV, vec2(S, C)) ); + } + UV += vec2( 0.5, 0.5 ); + + vec4 C0 = getFromColor(UV); + vec4 C1 = getToColor(UV); + + return mix( C0, C1, T ); +} diff --git a/ffmpeg/transitions/glsl/TVStatic.glsl b/ffmpeg/transitions/glsl/TVStatic.glsl new file mode 100644 index 0000000..66c2e86 --- /dev/null +++ b/ffmpeg/transitions/glsl/TVStatic.glsl @@ -0,0 +1,25 @@ +// author: Brandon Anzaldi +// license: MIT +uniform float offset; // = 0.05 + +// Pseudo-random noise function +// http://byteblacksmith.com/improvements-to-the-canonical-one-liner-glsl-rand-for-opengl-es-2-0/ +highp float noise(vec2 co) +{ + highp float a = 12.9898; + highp float b = 78.233; + highp float c = 43758.5453; + highp float dt= dot(co.xy * progress, vec2(a, b)); + highp float sn= mod(dt,3.14); + return fract(sin(sn) * c); +} + +vec4 transition(vec2 p) { + if (progress < offset) { + return getFromColor(p); + } else if (progress > (1.0 - offset)) { + return getToColor(p); + } else { + return vec4(vec3(noise(p)), 1.0); + } +} diff --git a/ffmpeg/transitions/glsl/TopBottom.glsl b/ffmpeg/transitions/glsl/TopBottom.glsl new file mode 100644 index 0000000..6370e07 --- /dev/null +++ b/ffmpeg/transitions/glsl/TopBottom.glsl @@ -0,0 +1,24 @@ +// Author:zhmy +// License: MIT + +const vec4 black = vec4(0.0, 0.0, 0.0, 1.0); +const vec2 boundMin = vec2(0.0, 0.0); +const vec2 boundMax = vec2(1.0, 1.0); + +bool inBounds (vec2 p) { + return all(lessThan(boundMin, p)) && all(lessThan(p, boundMax)); +} + +vec4 transition (vec2 uv) { + vec2 spfr,spto = vec2(-1.); + float size = mix(1.0, 3.0, progress*0.2); + spto = (uv + vec2(-0.5,-0.5))*vec2(size,size)+vec2(0.5,0.5); + spfr = (uv + vec2(0.0, 1.0 - progress)); + if(inBounds(spfr)){ + return getToColor(spfr); + } else if(inBounds(spto)){ + return getFromColor(spto) * (1.0 - progress); + } else{ + return black; + } +} \ No newline at end of file diff --git a/ffmpeg/transitions/glsl/WaterDrop.glsl b/ffmpeg/transitions/glsl/WaterDrop.glsl new file mode 100644 index 0000000..e3bdc43 --- /dev/null +++ b/ffmpeg/transitions/glsl/WaterDrop.glsl @@ -0,0 +1,16 @@ +// author: Paweł Płóciennik +// license: MIT +uniform float amplitude; // = 30 +uniform float speed; // = 30 + +vec4 transition(vec2 p) { + vec2 dir = p - vec2(.5); + float dist = length(dir); + + if (dist > progress) { + return mix(getFromColor( p), getToColor( p), progress); + } else { + vec2 offset = dir * sin(dist * amplitude - progress * speed); + return mix(getFromColor( p + offset), getToColor( p), progress); + } +} diff --git a/ffmpeg/transitions/glsl/ZoomInCircles.glsl b/ffmpeg/transitions/glsl/ZoomInCircles.glsl new file mode 100644 index 0000000..b647767 --- /dev/null +++ b/ffmpeg/transitions/glsl/ZoomInCircles.glsl @@ -0,0 +1,38 @@ +// License: MIT +// Author: dycm8009 +// ported by gre from https://gist.github.com/dycm8009/948e99b1800e81ad909a + +vec2 zoom(vec2 uv, float amount) { + return 0.5 + ((uv - 0.5) * amount); +} + +vec2 ratio2 = vec2(1.0, 1.0 / ratio); + +vec4 transition(vec2 uv) { + // TODO: some timing are hardcoded but should be one or many parameters + // TODO: should also be able to configure how much circles + // TODO: if() branching should be avoided when possible, prefer use of step() & other functions + vec2 r = 2.0 * ((vec2(uv.xy) - 0.5) * ratio2); + float pro = progress / 0.8; + float z = pro * 0.2; + float t = 0.0; + if (pro > 1.0) { + z = 0.2 + (pro - 1.0) * 5.; + t = clamp((progress - 0.8) / 0.07, 0.0, 1.0); + } + if (length(r) < 0.5+z) { + // uv = zoom(uv, 0.9 - 0.1 * pro); + } + else if (length(r) < 0.8+z*1.5) { + uv = zoom(uv, 1.0 - 0.15 * pro); + t = t * 0.5; + } + else if (length(r) < 1.2+z*2.5) { + uv = zoom(uv, 1.0 - 0.2 * pro); + t = t * 0.2; + } + else { + uv = zoom(uv, 1.0 - 0.25 * pro); + } + return mix(getFromColor(uv), getToColor(uv), t); +} diff --git a/ffmpeg/transitions/glsl/angular.glsl b/ffmpeg/transitions/glsl/angular.glsl new file mode 100644 index 0000000..453f2c4 --- /dev/null +++ b/ffmpeg/transitions/glsl/angular.glsl @@ -0,0 +1,21 @@ +// Author: Fernando Kuteken +// License: MIT + +#define PI 3.141592653589 + +uniform float startingAngle; // = 90; + +vec4 transition (vec2 uv) { + + float offset = startingAngle * PI / 180.0; + float angle = atan(uv.y - 0.5, uv.x - 0.5) + offset; + float normalizedAngle = (angle + PI) / (2.0 * PI); + + normalizedAngle = normalizedAngle - floor(normalizedAngle); + + return mix( + getFromColor(uv), + getToColor(uv), + step(normalizedAngle, progress) + ); +} diff --git a/ffmpeg/transitions/glsl/burn.glsl b/ffmpeg/transitions/glsl/burn.glsl new file mode 100644 index 0000000..08cdb59 --- /dev/null +++ b/ffmpeg/transitions/glsl/burn.glsl @@ -0,0 +1,10 @@ +// author: gre +// License: MIT +uniform vec3 color /* = vec3(0.9, 0.4, 0.2) */; +vec4 transition (vec2 uv) { + return mix( + getFromColor(uv) + vec4(progress*color, 1.0), + getToColor(uv) + vec4((1.0-progress)*color, 1.0), + progress + ); +} diff --git a/ffmpeg/transitions/glsl/cannabisleaf.glsl b/ffmpeg/transitions/glsl/cannabisleaf.glsl new file mode 100644 index 0000000..097c010 --- /dev/null +++ b/ffmpeg/transitions/glsl/cannabisleaf.glsl @@ -0,0 +1,15 @@ +// Author: @Flexi23 +// License: MIT + +// inspired by http://www.wolframalpha.com/input/?i=cannabis+curve + +vec4 transition (vec2 uv) { + if(progress == 0.0){ + return getFromColor(uv); + } + vec2 leaf_uv = (uv - vec2(0.5))/10./pow(progress,3.5); + leaf_uv.y += 0.35; + float r = 0.18; + float o = atan(leaf_uv.y, leaf_uv.x); + return mix(getFromColor(uv), getToColor(uv), 1.-step(1. - length(leaf_uv)+r*(1.+sin(o))*(1.+0.9 * cos(8.*o))*(1.+0.1*cos(24.*o))*(0.9+0.05*cos(200.*o)), 1.)); +} diff --git a/ffmpeg/transitions/glsl/circle.glsl b/ffmpeg/transitions/glsl/circle.glsl new file mode 100644 index 0000000..c8d6093 --- /dev/null +++ b/ffmpeg/transitions/glsl/circle.glsl @@ -0,0 +1,19 @@ +// Author: Fernando Kuteken +// License: MIT + +uniform vec2 center; // = vec2(0.5, 0.5); +uniform vec3 backColor; // = vec3(0.1, 0.1, 0.1); + +vec4 transition (vec2 uv) { + + float distance = length(uv - center); + float radius = sqrt(8.0) * abs(progress - 0.5); + + if (distance > radius) { + return vec4(backColor, 1.0); + } + else { + if (progress < 0.5) return getFromColor(uv); + else return getToColor(uv); + } +} diff --git a/ffmpeg/transitions/glsl/circleopen.glsl b/ffmpeg/transitions/glsl/circleopen.glsl new file mode 100644 index 0000000..d4a4368 --- /dev/null +++ b/ffmpeg/transitions/glsl/circleopen.glsl @@ -0,0 +1,13 @@ +// author: gre +// License: MIT +uniform float smoothness; // = 0.3 +uniform bool opening; // = true + +const vec2 center = vec2(0.5, 0.5); +const float SQRT_2 = 1.414213562373; + +vec4 transition (vec2 uv) { + float x = opening ? progress : 1.-progress; + float m = smoothstep(-smoothness, 0.0, SQRT_2*distance(center, uv) - x*(1.+smoothness)); + return mix(getFromColor(uv), getToColor(uv), opening ? 1.-m : m); +} diff --git a/ffmpeg/transitions/glsl/colorphase.glsl b/ffmpeg/transitions/glsl/colorphase.glsl new file mode 100644 index 0000000..2284e36 --- /dev/null +++ b/ffmpeg/transitions/glsl/colorphase.glsl @@ -0,0 +1,14 @@ +// Author: gre +// License: MIT + +// Usage: fromStep and toStep must be in [0.0, 1.0] range +// and all(fromStep) must be < all(toStep) + +uniform vec4 fromStep; // = vec4(0.0, 0.2, 0.4, 0.0) +uniform vec4 toStep; // = vec4(0.6, 0.8, 1.0, 1.0) + +vec4 transition (vec2 uv) { + vec4 a = getFromColor(uv); + vec4 b = getToColor(uv); + return mix(a, b, smoothstep(fromStep, toStep, vec4(progress))); +} diff --git a/ffmpeg/transitions/glsl/crosshatch.glsl b/ffmpeg/transitions/glsl/crosshatch.glsl new file mode 100644 index 0000000..a98991d --- /dev/null +++ b/ffmpeg/transitions/glsl/crosshatch.glsl @@ -0,0 +1,16 @@ +// License: MIT +// Author: pthrasher +// adapted by gre from https://gist.github.com/pthrasher/04fd9a7de4012cbb03f6 + +uniform vec2 center; // = vec2(0.5) +uniform float threshold; // = 3.0 +uniform float fadeEdge; // = 0.1 + +float rand(vec2 co) { + return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453); +} +vec4 transition(vec2 p) { + float dist = distance(center, p) / threshold; + float r = progress - min(rand(vec2(p.y, 0.0)), rand(vec2(0.0, p.x))); + return mix(getFromColor(p), getToColor(p), mix(0.0, mix(step(dist, r), 1.0, smoothstep(1.0-fadeEdge, 1.0, progress)), smoothstep(0.0, fadeEdge, progress))); +} diff --git a/ffmpeg/transitions/glsl/crosswarp.glsl b/ffmpeg/transitions/glsl/crosswarp.glsl new file mode 100644 index 0000000..524df87 --- /dev/null +++ b/ffmpeg/transitions/glsl/crosswarp.glsl @@ -0,0 +1,7 @@ +// Author: Eke Péter +// License: MIT +vec4 transition(vec2 p) { + float x = progress; + x=smoothstep(.0,1.0,(x*2.0+p.x-1.0)); + return mix(getFromColor((p-.5)*(1.-x)+.5), getToColor((p-.5)*x+.5), x); +} diff --git a/ffmpeg/transitions/glsl/cube.glsl b/ffmpeg/transitions/glsl/cube.glsl new file mode 100644 index 0000000..e049ccf --- /dev/null +++ b/ffmpeg/transitions/glsl/cube.glsl @@ -0,0 +1,66 @@ +// Author: gre +// License: MIT +uniform float persp; // = 0.7 +uniform float unzoom; // = 0.3 +uniform float reflection; // = 0.4 +uniform float floating; // = 3.0 + +vec2 project (vec2 p) { + return p * vec2(1.0, -1.2) + vec2(0.0, -floating/100.); +} + +bool inBounds (vec2 p) { + return all(lessThan(vec2(0.0), p)) && all(lessThan(p, vec2(1.0))); +} + +vec4 bgColor (vec2 p, vec2 pfr, vec2 pto) { + vec4 c = vec4(0.0, 0.0, 0.0, 1.0); + pfr = project(pfr); + // FIXME avoid branching might help perf! + if (inBounds(pfr)) { + c += mix(vec4(0.0), getFromColor(pfr), reflection * mix(1.0, 0.0, pfr.y)); + } + pto = project(pto); + if (inBounds(pto)) { + c += mix(vec4(0.0), getToColor(pto), reflection * mix(1.0, 0.0, pto.y)); + } + return c; +} + +// p : the position +// persp : the perspective in [ 0, 1 ] +// center : the xcenter in [0, 1] \ 0.5 excluded +vec2 xskew (vec2 p, float persp, float center) { + float x = mix(p.x, 1.0-p.x, center); + return ( + ( + vec2( x, (p.y - 0.5*(1.0-persp) * x) / (1.0+(persp-1.0)*x) ) + - vec2(0.5-distance(center, 0.5), 0.0) + ) + * vec2(0.5 / distance(center, 0.5) * (center<0.5 ? 1.0 : -1.0), 1.0) + + vec2(center<0.5 ? 0.0 : 1.0, 0.0) + ); +} + +vec4 transition(vec2 op) { + float uz = unzoom * 2.0*(0.5-distance(0.5, progress)); + vec2 p = -uz*0.5+(1.0+uz) * op; + vec2 fromP = xskew( + (p - vec2(progress, 0.0)) / vec2(1.0-progress, 1.0), + 1.0-mix(progress, 0.0, persp), + 0.0 + ); + vec2 toP = xskew( + p / vec2(progress, 1.0), + mix(pow(progress, 2.0), 1.0, persp), + 1.0 + ); + // FIXME avoid branching might help perf! + if (inBounds(fromP)) { + return getFromColor(fromP); + } + else if (inBounds(toP)) { + return getToColor(toP); + } + return bgColor(op, fromP, toP); +} diff --git a/ffmpeg/transitions/glsl/directional-easing.glsl b/ffmpeg/transitions/glsl/directional-easing.glsl new file mode 100644 index 0000000..12c0dbb --- /dev/null +++ b/ffmpeg/transitions/glsl/directional-easing.glsl @@ -0,0 +1,15 @@ +// Author: Max Plotnikov +// License: MIT + +uniform vec2 direction; // = vec2(0.0, 1.0) + +vec4 transition (vec2 uv) { + float easing = sqrt((2.0 - progress) * progress); + vec2 p = uv + easing * sign(direction); + vec2 f = fract(p); + return mix( + getToColor(f), + getFromColor(f), + step(0.0, p.y) * step(p.y, 1.0) * step(0.0, p.x) * step(p.x, 1.0) + ); +} diff --git a/ffmpeg/transitions/glsl/directionalwarp.glsl b/ffmpeg/transitions/glsl/directionalwarp.glsl new file mode 100644 index 0000000..dbb5016 --- /dev/null +++ b/ffmpeg/transitions/glsl/directionalwarp.glsl @@ -0,0 +1,15 @@ +// Author: pschroen +// License: MIT + +uniform vec2 direction; // = vec2(-1.0, 1.0) + +const float smoothness = 0.5; +const vec2 center = vec2(0.5, 0.5); + +vec4 transition (vec2 uv) { + vec2 v = normalize(direction); + v /= abs(v.x) + abs(v.y); + float d = v.x * center.x + v.y * center.y; + float m = 1.0 - smoothstep(-smoothness, 0.0, v.x * uv.x + v.y * uv.y - (d - 0.5 + progress * (1.0 + smoothness))); + return mix(getFromColor((uv - 0.5) * (1.0 - m) + 0.5), getToColor((uv - 0.5) * m + 0.5), m); +} diff --git a/ffmpeg/transitions/glsl/directionalwipe.glsl b/ffmpeg/transitions/glsl/directionalwipe.glsl new file mode 100644 index 0000000..03d2e8b --- /dev/null +++ b/ffmpeg/transitions/glsl/directionalwipe.glsl @@ -0,0 +1,17 @@ +// Author: gre +// License: MIT + +uniform vec2 direction; // = vec2(1.0, -1.0) +uniform float smoothness; // = 0.5 + +const vec2 center = vec2(0.5, 0.5); + +vec4 transition (vec2 uv) { + vec2 v = normalize(direction); + v /= abs(v.x)+abs(v.y); + float d = v.x * center.x + v.y * center.y; + float m = + (1.0-step(progress, 0.0)) * // there is something wrong with our formula that makes m not equals 0.0 with progress is 0.0 + (1.0 - smoothstep(-smoothness, 0.0, v.x * uv.x + v.y * uv.y - (d-0.5+progress*(1.+smoothness)))); + return mix(getFromColor(uv), getToColor(uv), m); +} diff --git a/ffmpeg/transitions/glsl/displacement.glsl b/ffmpeg/transitions/glsl/displacement.glsl new file mode 100644 index 0000000..2ef57f0 --- /dev/null +++ b/ffmpeg/transitions/glsl/displacement.glsl @@ -0,0 +1,22 @@ +// Author: Travis Fischer +// License: MIT +// +// Adapted from a Codrops article by Robin Delaporte +// https://tympanus.net/Development/DistortionHoverEffect + +uniform sampler2D displacementMap; + +uniform float strength; // = 0.5 + +vec4 transition (vec2 uv) { + float displacement = texture2D(displacementMap, uv).r * strength; + + vec2 uvFrom = vec2(uv.x + progress * displacement, uv.y); + vec2 uvTo = vec2(uv.x - (1.0 - progress) * displacement, uv.y); + + return mix( + getFromColor(uvFrom), + getToColor(uvTo), + progress + ); +} diff --git a/ffmpeg/transitions/glsl/doorway.glsl b/ffmpeg/transitions/glsl/doorway.glsl new file mode 100644 index 0000000..737ca6a --- /dev/null +++ b/ffmpeg/transitions/glsl/doorway.glsl @@ -0,0 +1,50 @@ +// author: gre +// License: MIT +uniform float reflection; // = 0.4 +uniform float perspective; // = 0.4 +uniform float depth; // = 3 + +const vec4 black = vec4(0.0, 0.0, 0.0, 1.0); +const vec2 boundMin = vec2(0.0, 0.0); +const vec2 boundMax = vec2(1.0, 1.0); + +bool inBounds (vec2 p) { + return all(lessThan(boundMin, p)) && all(lessThan(p, boundMax)); +} + +vec2 project (vec2 p) { + return p * vec2(1.0, -1.2) + vec2(0.0, -0.02); +} + +vec4 bgColor (vec2 p, vec2 pto) { + vec4 c = black; + pto = project(pto); + if (inBounds(pto)) { + c += mix(black, getToColor(pto), reflection * mix(1.0, 0.0, pto.y)); + } + return c; +} + + +vec4 transition (vec2 p) { + vec2 pfr = vec2(-1.), pto = vec2(-1.); + float middleSlit = 2.0 * abs(p.x-0.5) - progress; + if (middleSlit > 0.0) { + pfr = p + (p.x > 0.5 ? -1.0 : 1.0) * vec2(0.5*progress, 0.0); + float d = 1.0/(1.0+perspective*progress*(1.0-middleSlit)); + pfr.y -= d/2.; + pfr.y *= d; + pfr.y += d/2.; + } + float size = mix(1.0, depth, 1.-progress); + pto = (p + vec2(-0.5, -0.5)) * vec2(size, size) + vec2(0.5, 0.5); + if (inBounds(pfr)) { + return getFromColor(pfr); + } + else if (inBounds(pto)) { + return getToColor(pto); + } + else { + return bgColor(p, pto); + } +} diff --git a/ffmpeg/transitions/glsl/fade.glsl b/ffmpeg/transitions/glsl/fade.glsl new file mode 100644 index 0000000..edcd56c --- /dev/null +++ b/ffmpeg/transitions/glsl/fade.glsl @@ -0,0 +1,10 @@ +// author: gre +// license: MIT + +vec4 transition (vec2 uv) { + return mix( + getFromColor(uv), + getToColor(uv), + progress + ); +} diff --git a/ffmpeg/transitions/glsl/fadecolor.glsl b/ffmpeg/transitions/glsl/fadecolor.glsl new file mode 100644 index 0000000..dd54e45 --- /dev/null +++ b/ffmpeg/transitions/glsl/fadecolor.glsl @@ -0,0 +1,10 @@ +// author: gre +// License: MIT +uniform vec3 color;// = vec3(0.0) +uniform float colorPhase; // = 0.4 ; // if 0.0, there is no black phase, if 0.9, the black phase is very important +vec4 transition (vec2 uv) { + return mix( + mix(vec4(color, 1.0), getFromColor(uv), smoothstep(1.0-colorPhase, 0.0, progress)), + mix(vec4(color, 1.0), getToColor(uv), smoothstep( colorPhase, 1.0, progress)), + progress); +} diff --git a/ffmpeg/transitions/glsl/fadegrayscale.glsl b/ffmpeg/transitions/glsl/fadegrayscale.glsl new file mode 100644 index 0000000..081d2c0 --- /dev/null +++ b/ffmpeg/transitions/glsl/fadegrayscale.glsl @@ -0,0 +1,17 @@ +// Author: gre +// License: MIT + +uniform float intensity; // = 0.3; // if 0.0, the image directly turn grayscale, if 0.9, the grayscale transition phase is very important + +vec3 grayscale (vec3 color) { + return vec3(0.2126*color.r + 0.7152*color.g + 0.0722*color.b); +} + +vec4 transition (vec2 uv) { + vec4 fc = getFromColor(uv); + vec4 tc = getToColor(uv); + return mix( + mix(vec4(grayscale(fc.rgb), 1.0), fc, smoothstep(1.0-intensity, 0.0, progress)), + mix(vec4(grayscale(tc.rgb), 1.0), tc, smoothstep( intensity, 1.0, progress)), + progress); +} diff --git a/ffmpeg/transitions/glsl/flyeye.glsl b/ffmpeg/transitions/glsl/flyeye.glsl new file mode 100644 index 0000000..0482bdb --- /dev/null +++ b/ffmpeg/transitions/glsl/flyeye.glsl @@ -0,0 +1,17 @@ +// Author: gre +// License: MIT +uniform float size; // = 0.04 +uniform float zoom; // = 50.0 +uniform float colorSeparation; // = 0.3 + +vec4 transition(vec2 p) { + float inv = 1. - progress; + vec2 disp = size*vec2(cos(zoom*p.x), sin(zoom*p.y)); + vec4 texTo = getToColor(p + inv*disp); + vec4 texFrom = vec4( + getFromColor(p + progress*disp*(1.0 - colorSeparation)).r, + getFromColor(p + progress*disp).g, + getFromColor(p + progress*disp*(1.0 + colorSeparation)).b, + 1.0); + return texTo*progress + texFrom*inv; +} diff --git a/ffmpeg/transitions/glsl/heart.glsl b/ffmpeg/transitions/glsl/heart.glsl new file mode 100644 index 0000000..914ce4a --- /dev/null +++ b/ffmpeg/transitions/glsl/heart.glsl @@ -0,0 +1,16 @@ +// Author: gre +// License: MIT + +float inHeart (vec2 p, vec2 center, float size) { + if (size==0.0) return 0.0; + vec2 o = (p-center)/(1.6*size); + float a = o.x*o.x+o.y*o.y-0.3; + return step(a*a*a, o.x*o.x*o.y*o.y*o.y); +} +vec4 transition (vec2 uv) { + return mix( + getFromColor(uv), + getToColor(uv), + inHeart(uv, vec2(0.5, 0.4), progress) + ); +} diff --git a/ffmpeg/transitions/glsl/hexagonalize.glsl b/ffmpeg/transitions/glsl/hexagonalize.glsl new file mode 100644 index 0000000..89de5f3 --- /dev/null +++ b/ffmpeg/transitions/glsl/hexagonalize.glsl @@ -0,0 +1,74 @@ +// Author: Fernando Kuteken +// License: MIT +// Hexagonal math from: http://www.redblobgames.com/grids/hexagons/ + +uniform int steps; // = 50; +uniform float horizontalHexagons; //= 20; + +struct Hexagon { + float q; + float r; + float s; +}; + +Hexagon createHexagon(float q, float r){ + Hexagon hex; + hex.q = q; + hex.r = r; + hex.s = -q - r; + return hex; +} + +Hexagon roundHexagon(Hexagon hex){ + + float q = floor(hex.q + 0.5); + float r = floor(hex.r + 0.5); + float s = floor(hex.s + 0.5); + + float deltaQ = abs(q - hex.q); + float deltaR = abs(r - hex.r); + float deltaS = abs(s - hex.s); + + if (deltaQ > deltaR && deltaQ > deltaS) + q = -r - s; + else if (deltaR > deltaS) + r = -q - s; + else + s = -q - r; + + return createHexagon(q, r); +} + +Hexagon hexagonFromPoint(vec2 point, float size) { + + point.y /= ratio; + point = (point - 0.5) / size; + + float q = (sqrt(3.0) / 3.0) * point.x + (-1.0 / 3.0) * point.y; + float r = 0.0 * point.x + 2.0 / 3.0 * point.y; + + Hexagon hex = createHexagon(q, r); + return roundHexagon(hex); + +} + +vec2 pointFromHexagon(Hexagon hex, float size) { + + float x = (sqrt(3.0) * hex.q + (sqrt(3.0) / 2.0) * hex.r) * size + 0.5; + float y = (0.0 * hex.q + (3.0 / 2.0) * hex.r) * size + 0.5; + + return vec2(x, y * ratio); +} + +vec4 transition (vec2 uv) { + + float dist = 2.0 * min(progress, 1.0 - progress); + dist = steps > 0 ? ceil(dist * float(steps)) / float(steps) : dist; + + float size = (sqrt(3.0) / 3.0) * dist / horizontalHexagons; + + vec2 point = dist > 0.0 ? pointFromHexagon(hexagonFromPoint(uv, size), size) : uv; + + return mix(getFromColor(point), getToColor(point), progress); + +} diff --git a/ffmpeg/transitions/glsl/kaleidoscope.glsl b/ffmpeg/transitions/glsl/kaleidoscope.glsl new file mode 100644 index 0000000..6dfbc4a --- /dev/null +++ b/ffmpeg/transitions/glsl/kaleidoscope.glsl @@ -0,0 +1,22 @@ +// Author: nwoeanhinnogaehr +// License: MIT + +uniform float speed; // = 1.0; +uniform float angle; // = 1.0; +uniform float power; // = 1.5; + +vec4 transition(vec2 uv) { + vec2 p = uv.xy / vec2(1.0).xy; + vec2 q = p; + float t = pow(progress, power)*speed; + p = p -0.5; + for (int i = 0; i < 7; i++) { + p = vec2(sin(t)*p.x + cos(t)*p.y, sin(t)*p.y - cos(t)*p.x); + t += angle; + p = abs(mod(p, 2.0) - 1.0); + } + abs(mod(p, 1.0)); + return mix( + mix(getFromColor(q), getToColor(q), progress), + mix(getFromColor(p), getToColor(p), progress), 1.0 - 2.0*abs(progress - 0.5)); +} diff --git a/ffmpeg/transitions/glsl/luma.glsl b/ffmpeg/transitions/glsl/luma.glsl new file mode 100644 index 0000000..c44bca3 --- /dev/null +++ b/ffmpeg/transitions/glsl/luma.glsl @@ -0,0 +1,12 @@ +// Author: gre +// License: MIT + +uniform sampler2D luma; + +vec4 transition(vec2 uv) { + return mix( + getToColor(uv), + getFromColor(uv), + step(progress, texture2D(luma, uv).r) + ); +} diff --git a/ffmpeg/transitions/glsl/luminance_melt.glsl b/ffmpeg/transitions/glsl/luminance_melt.glsl new file mode 100644 index 0000000..02666f8 --- /dev/null +++ b/ffmpeg/transitions/glsl/luminance_melt.glsl @@ -0,0 +1,126 @@ +// Author: 0gust1 +// License: MIT +//My own first transition — based on crosshatch code (from pthrasher), using simplex noise formula (copied and pasted) +//-> cooler with high contrasted images (isolated dark subject on light background f.e.) +//TODO : try to rebase it on DoomTransition (from zeh)? +//optimizations : +//luminance (see http://stackoverflow.com/questions/596216/formula-to-determine-brightness-of-rgb-color#answer-596241) +// Y = (R+R+B+G+G+G)/6 +//or Y = (R+R+R+B+G+G+G+G)>>3 + + +//direction of movement : 0 : up, 1, down +uniform bool direction; // = 1 +//luminance threshold +uniform float l_threshold; // = 0.8 +//does the movement takes effect above or below luminance threshold ? +uniform bool above; // = false + + +//Random function borrowed from everywhere +float rand(vec2 co){ + return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453); +} + + +// Simplex noise : +// Description : Array and textureless GLSL 2D simplex noise function. +// Author : Ian McEwan, Ashima Arts. +// Maintainer : ijm +// Lastmod : 20110822 (ijm) +// License : MIT +// 2011 Ashima Arts. All rights reserved. +// Distributed under the MIT License. See LICENSE file. +// https://github.com/ashima/webgl-noise +// + +vec3 mod289(vec3 x) { + return x - floor(x * (1.0 / 289.0)) * 289.0; +} + +vec2 mod289(vec2 x) { + return x - floor(x * (1.0 / 289.0)) * 289.0; +} + +vec3 permute(vec3 x) { + return mod289(((x*34.0)+1.0)*x); +} + +float snoise(vec2 v) + { + const vec4 C = vec4(0.211324865405187, // (3.0-sqrt(3.0))/6.0 + 0.366025403784439, // 0.5*(sqrt(3.0)-1.0) + -0.577350269189626, // -1.0 + 2.0 * C.x + 0.024390243902439); // 1.0 / 41.0 +// First corner + vec2 i = floor(v + dot(v, C.yy) ); + vec2 x0 = v - i + dot(i, C.xx); + +// Other corners + vec2 i1; + //i1.x = step( x0.y, x0.x ); // x0.x > x0.y ? 1.0 : 0.0 + //i1.y = 1.0 - i1.x; + i1 = (x0.x > x0.y) ? vec2(1.0, 0.0) : vec2(0.0, 1.0); + // x0 = x0 - 0.0 + 0.0 * C.xx ; + // x1 = x0 - i1 + 1.0 * C.xx ; + // x2 = x0 - 1.0 + 2.0 * C.xx ; + vec4 x12 = x0.xyxy + C.xxzz; + x12.xy -= i1; + +// Permutations + i = mod289(i); // Avoid truncation effects in permutation + vec3 p = permute( permute( i.y + vec3(0.0, i1.y, 1.0 )) + + i.x + vec3(0.0, i1.x, 1.0 )); + + vec3 m = max(0.5 - vec3(dot(x0,x0), dot(x12.xy,x12.xy), dot(x12.zw,x12.zw)), 0.0); + m = m*m ; + m = m*m ; + +// Gradients: 41 points uniformly over a line, mapped onto a diamond. +// The ring size 17*17 = 289 is close to a multiple of 41 (41*7 = 287) + + vec3 x = 2.0 * fract(p * C.www) - 1.0; + vec3 h = abs(x) - 0.5; + vec3 ox = floor(x + 0.5); + vec3 a0 = x - ox; + +// Normalise gradients implicitly by scaling m +// Approximation of: m *= inversesqrt( a0*a0 + h*h ); + m *= 1.79284291400159 - 0.85373472095314 * ( a0*a0 + h*h ); + +// Compute final noise value at P + vec3 g; + g.x = a0.x * x0.x + h.x * x0.y; + g.yz = a0.yz * x12.xz + h.yz * x12.yw; + return 130.0 * dot(m, g); +} + +// Simplex noise -- end + +float luminance(vec4 color){ + //(0.299*R + 0.587*G + 0.114*B) + return color.r*0.299+color.g*0.587+color.b*0.114; +} + +vec2 center = vec2(1.0, direction); + +vec4 transition(vec2 uv) { + vec2 p = uv.xy / vec2(1.0).xy; + if (progress == 0.0) { + return getFromColor(p); + } else if (progress == 1.0) { + return getToColor(p); + } else { + float x = progress; + float dist = distance(center, p)- progress*exp(snoise(vec2(p.x, 0.0))); + float r = x - rand(vec2(p.x, 0.1)); + float m; + if(above){ + m = dist <= r && luminance(getFromColor(p))>l_threshold ? 1.0 : (progress*progress*progress); + } + else{ + m = dist <= r && luminance(getFromColor(p))0 ? ceil(d * float(steps)) / float(steps) : d; +vec2 squareSize = 2.0 * dist / vec2(squaresMin); + +vec4 transition(vec2 uv) { + vec2 p = dist>0.0 ? (floor(uv / squareSize) + 0.5) * squareSize : uv; + return mix(getFromColor(p), getToColor(p), progress); +} diff --git a/ffmpeg/transitions/glsl/polar_function.glsl b/ffmpeg/transitions/glsl/polar_function.glsl new file mode 100644 index 0000000..4857b25 --- /dev/null +++ b/ffmpeg/transitions/glsl/polar_function.glsl @@ -0,0 +1,20 @@ +// Author: Fernando Kuteken +// License: MIT + +#define PI 3.14159265359 + +uniform int segments; // = 5; + +vec4 transition (vec2 uv) { + + float angle = atan(uv.y - 0.5, uv.x - 0.5) - 0.5 * PI; + float normalized = (angle + 1.5 * PI) * (2.0 * PI); + + float radius = (cos(float(segments) * angle) + 4.0) / 4.0; + float difference = length(uv - vec2(0.5, 0.5)); + + if (difference > radius * progress) + return getFromColor(uv); + else + return getToColor(uv); +} diff --git a/ffmpeg/transitions/glsl/randomNoisex.glsl b/ffmpeg/transitions/glsl/randomNoisex.glsl new file mode 100644 index 0000000..a16b37e --- /dev/null +++ b/ffmpeg/transitions/glsl/randomNoisex.glsl @@ -0,0 +1,16 @@ +// Author:towrabbit +// License: MIT + +float random (vec2 st) { + return fract(sin(dot(st.xy,vec2(12.9898,78.233)))*43758.5453123); +} +vec4 transition (vec2 uv) { + vec4 leftSide = getFromColor(uv); + vec2 uv1 = uv; + vec2 uv2 = uv; + float uvz = floor(random(uv1)+progress); + vec4 rightSide = getToColor(uv); + float p = progress*2.0; + return mix(leftSide,rightSide,uvz); + return leftSide * ceil(uv.x*2.-p) + rightSide * ceil(-uv.x*2.+p); +} diff --git a/ffmpeg/transitions/glsl/randomsquares.glsl b/ffmpeg/transitions/glsl/randomsquares.glsl new file mode 100644 index 0000000..85d766c --- /dev/null +++ b/ffmpeg/transitions/glsl/randomsquares.glsl @@ -0,0 +1,15 @@ +// Author: gre +// License: MIT + +uniform ivec2 size; // = ivec2(10, 10) +uniform float smoothness; // = 0.5 + +float rand (vec2 co) { + return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453); +} + +vec4 transition(vec2 p) { + float r = rand(floor(vec2(size) * p)); + float m = smoothstep(0.0, -smoothness, r - (progress * (1.0 + smoothness))); + return mix(getFromColor(p), getToColor(p), m); +} diff --git a/ffmpeg/transitions/glsl/ripple.glsl b/ffmpeg/transitions/glsl/ripple.glsl new file mode 100644 index 0000000..2d59766 --- /dev/null +++ b/ffmpeg/transitions/glsl/ripple.glsl @@ -0,0 +1,15 @@ +// Author: gre +// License: MIT +uniform float amplitude; // = 100.0 +uniform float speed; // = 50.0 + +vec4 transition (vec2 uv) { + vec2 dir = uv - vec2(.5); + float dist = length(dir); + vec2 offset = dir * (sin(progress * dist * amplitude - progress * speed) + .5) / 30.; + return mix( + getFromColor(uv + offset), + getToColor(uv), + smoothstep(0.2, 1.0, progress) + ); +} diff --git a/ffmpeg/transitions/glsl/rotate_scale_fade.glsl b/ffmpeg/transitions/glsl/rotate_scale_fade.glsl new file mode 100644 index 0000000..5a96e52 --- /dev/null +++ b/ffmpeg/transitions/glsl/rotate_scale_fade.glsl @@ -0,0 +1,32 @@ +// Author: Fernando Kuteken +// License: MIT + +#define PI 3.14159265359 + +uniform vec2 center; // = vec2(0.5, 0.5); +uniform float rotations; // = 1; +uniform float scale; // = 8; +uniform vec4 backColor; // = vec4(0.15, 0.15, 0.15, 1.0); + +vec4 transition (vec2 uv) { + + vec2 difference = uv - center; + vec2 dir = normalize(difference); + float dist = length(difference); + + float angle = 2.0 * PI * rotations * progress; + + float c = cos(angle); + float s = sin(angle); + + float currentScale = mix(scale, 1.0, 2.0 * abs(progress - 0.5)); + + vec2 rotatedDir = vec2(dir.x * c - dir.y * s, dir.x * s + dir.y * c); + vec2 rotatedUv = center + rotatedDir * dist / currentScale; + + if (rotatedUv.x < 0.0 || rotatedUv.x > 1.0 || + rotatedUv.y < 0.0 || rotatedUv.y > 1.0) + return backColor; + + return mix(getFromColor(rotatedUv), getToColor(rotatedUv), progress); +} diff --git a/ffmpeg/transitions/glsl/squareswire.glsl b/ffmpeg/transitions/glsl/squareswire.glsl new file mode 100644 index 0000000..43a0e0b --- /dev/null +++ b/ffmpeg/transitions/glsl/squareswire.glsl @@ -0,0 +1,20 @@ +// Author: gre +// License: MIT + +uniform ivec2 squares;// = ivec2(10,10) +uniform vec2 direction;// = vec2(1.0, -0.5) +uniform float smoothness; // = 1.6 + +const vec2 center = vec2(0.5, 0.5); +vec4 transition (vec2 p) { + vec2 v = normalize(direction); + v /= abs(v.x)+abs(v.y); + float d = v.x * center.x + v.y * center.y; + float offset = smoothness; + float pr = smoothstep(-offset, 0.0, v.x * p.x + v.y * p.y - (d-0.5+progress*(1.+offset))); + vec2 squarep = fract(p*vec2(squares)); + vec2 squaremin = vec2(pr/2.0); + vec2 squaremax = vec2(1.0 - pr/2.0); + float a = (1.0 - step(progress, 0.0)) * step(squaremin.x, squarep.x) * step(squaremin.y, squarep.y) * step(squarep.x, squaremax.x) * step(squarep.y, squaremax.y); + return mix(getFromColor(p), getToColor(p), a); +} diff --git a/ffmpeg/transitions/glsl/squeeze.glsl b/ffmpeg/transitions/glsl/squeeze.glsl new file mode 100644 index 0000000..8f71c9c --- /dev/null +++ b/ffmpeg/transitions/glsl/squeeze.glsl @@ -0,0 +1,19 @@ +// Author: gre +// License: MIT + +uniform float colorSeparation; // = 0.04 + +vec4 transition (vec2 uv) { + float y = 0.5 + (uv.y-0.5) / (1.0-progress); + if (y < 0.0 || y > 1.0) { + return getToColor(uv); + } + else { + vec2 fp = vec2(uv.x, y); + vec2 off = progress * vec2(0.0, colorSeparation); + vec4 c = getFromColor(fp); + vec4 cn = getFromColor(fp - off); + vec4 cp = getFromColor(fp + off); + return vec4(cn.r, c.g, cp.b, c.a); + } +} diff --git a/ffmpeg/transitions/glsl/swap.glsl b/ffmpeg/transitions/glsl/swap.glsl new file mode 100644 index 0000000..763dc66 --- /dev/null +++ b/ffmpeg/transitions/glsl/swap.glsl @@ -0,0 +1,59 @@ +// Author: gre +// License: MIT +// General parameters +uniform float reflection; // = 0.4 +uniform float perspective; // = 0.2 +uniform float depth; // = 3.0 + +const vec4 black = vec4(0.0, 0.0, 0.0, 1.0); +const vec2 boundMin = vec2(0.0, 0.0); +const vec2 boundMax = vec2(1.0, 1.0); + +bool inBounds (vec2 p) { + return all(lessThan(boundMin, p)) && all(lessThan(p, boundMax)); +} + +vec2 project (vec2 p) { + return p * vec2(1.0, -1.2) + vec2(0.0, -0.02); +} + +vec4 bgColor (vec2 p, vec2 pfr, vec2 pto) { + vec4 c = black; + pfr = project(pfr); + if (inBounds(pfr)) { + c += mix(black, getFromColor(pfr), reflection * mix(1.0, 0.0, pfr.y)); + } + pto = project(pto); + if (inBounds(pto)) { + c += mix(black, getToColor(pto), reflection * mix(1.0, 0.0, pto.y)); + } + return c; +} + +vec4 transition(vec2 p) { + vec2 pfr, pto = vec2(-1.); + + float size = mix(1.0, depth, progress); + float persp = perspective * progress; + pfr = (p + vec2(-0.0, -0.5)) * vec2(size/(1.0-perspective*progress), size/(1.0-size*persp*p.x)) + vec2(0.0, 0.5); + + size = mix(1.0, depth, 1.-progress); + persp = perspective * (1.-progress); + pto = (p + vec2(-1.0, -0.5)) * vec2(size/(1.0-perspective*(1.0-progress)), size/(1.0-size*persp*(0.5-p.x))) + vec2(1.0, 0.5); + + if (progress < 0.5) { + if (inBounds(pfr)) { + return getFromColor(pfr); + } + if (inBounds(pto)) { + return getToColor(pto); + } + } + if (inBounds(pto)) { + return getToColor(pto); + } + if (inBounds(pfr)) { + return getFromColor(pfr); + } + return bgColor(p, pfr, pto); +} diff --git a/ffmpeg/transitions/glsl/tangentMotionBlur.glsl b/ffmpeg/transitions/glsl/tangentMotionBlur.glsl new file mode 100644 index 0000000..1f93399 --- /dev/null +++ b/ffmpeg/transitions/glsl/tangentMotionBlur.glsl @@ -0,0 +1,139 @@ + +// License: MIT +// Author: chenkai +// ported from https://codertw.com/%E7%A8%8B%E5%BC%8F%E8%AA%9E%E8%A8%80/671116/ + +float rand (vec2 co) { + return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453); +} + +// motion blur for texture from +vec4 motionBlurFrom(vec2 _st, vec2 speed) { + vec2 texCoord = _st.xy / vec2(1.0).xy; + vec3 color = vec3(0.0); + float total = 0.0; + float offset = rand(_st); + for (float t = 0.0; t <= 20.0; t++) { + float percent = (t + offset) / 20.0; + float weight = 4.0 * (percent - percent * percent); + vec2 newuv = texCoord + speed * percent; + newuv = fract(newuv); + color += getFromColor(newuv).rgb * weight; + total += weight; + } + return vec4(color / total, 1.0); +} + +// motion blur for texture to +vec4 motionBlurTo(vec2 _st, vec2 speed) { + vec2 texCoord = _st.xy / vec2(1.0).xy; + vec3 color = vec3(0.0); + float total = 0.0; + float offset = rand(_st); + for (float t = 0.0; t <= 20.0; t++) { + float percent = (t + offset) / 20.0; + float weight = 4.0 * (percent - percent * percent); + vec2 newuv = texCoord + speed * percent; + newuv = fract(newuv); + color += getToColor(newuv).rgb * weight; + total += weight; + } + return vec4(color / total, 1.0); +} + + +// bezier in gpu +float A(float aA1, float aA2) { + return 1.0 - 3.0 * aA2 + 3.0 * aA1; +} +float B(float aA1, float aA2) { + return 3.0 * aA2 - 6.0 * aA1; +} +float C(float aA1) { + return 3.0 * aA1; +} +float GetSlope(float aT, float aA1, float aA2) { + return 3.0 * A(aA1, aA2)*aT*aT + 2.0 * B(aA1, aA2) * aT + C(aA1); +} +float CalcBezier(float aT, float aA1, float aA2) { + return ((A(aA1, aA2)*aT + B(aA1, aA2))*aT + C(aA1))*aT; +} +float GetTForX(float aX, float mX1, float mX2) { + // iteration to solve + float aGuessT = aX; + for (int i = 0; i < 4; ++i) { + float currentSlope = GetSlope(aGuessT, mX1, mX2); + if (currentSlope == 0.0) return aGuessT; + float currentX = CalcBezier(aGuessT, mX1, mX2) - aX; + aGuessT -= currentX / currentSlope; + } + return aGuessT; +} +float KeySpline(float aX, float mX1, float mY1, float mX2, float mY2) { + if (mX1 == mY1 && mX2 == mY2) return aX; // linear + return CalcBezier(GetTForX(aX, mX1, mX2), mY1, mY2); // x to t, t to y +} + +// norm distribution +float normpdf(float x) { + return exp(-20.*pow(x-.5,2.)); +} + +vec2 rotateUv(vec2 uv, float angle, vec2 anchor, float zDirection) { + uv = uv - anchor; // anchor to origin + float s = sin(angle); + float c = cos(angle); + mat2 m = mat2(c, -s, s, c); + uv = m * uv; + uv += anchor; // anchor back + return uv; +} + + + +vec4 transition (vec2 uv) { + + vec2 iResolution = vec2(100.0, 100.0); // screen size + + vec2 myst = uv; + float ratio = iResolution.x / iResolution.y; // screen ratio + float animationTime = progress; //getAnimationTime(); + float easingTime = KeySpline(animationTime, .68,.01,.17,.98); + float blur = normpdf(easingTime); + float r = 0.; + float rotation = 180./180.*3.14159; + if (easingTime <= .5) { + r = rotation * easingTime; + } else { + r = -rotation + rotation * easingTime; + } + + // rotation for current frame + vec2 mystCurrent = myst; + mystCurrent.y *= 1./ratio; + mystCurrent = rotateUv(mystCurrent, r, vec2(1., 0.), -1.); + mystCurrent.y *= ratio; + + // frame timeInterval by fps=30 + float timeInterval = 0.0167*2.0; + if (easingTime <= .5) { + r = rotation * (easingTime+timeInterval); + } else { + r = -rotation + rotation * (easingTime+timeInterval); + } + + // rotation for next frame + vec2 mystNext = myst; + mystNext.y *= 1./ratio; + mystNext = rotateUv(mystNext, r, vec2(1., 0.), -1.); + mystNext.y *= ratio; + + // get speed at tagent direction + vec2 speed = (mystNext - mystCurrent) / timeInterval * blur * 0.5; + if (easingTime <= .5) { + return motionBlurFrom(mystCurrent, speed); + } else { + return motionBlurTo(mystCurrent, speed); + } +} + diff --git a/ffmpeg/transitions/glsl/undulatingBurnOut.glsl b/ffmpeg/transitions/glsl/undulatingBurnOut.glsl new file mode 100644 index 0000000..65e1382 --- /dev/null +++ b/ffmpeg/transitions/glsl/undulatingBurnOut.glsl @@ -0,0 +1,47 @@ +// License: MIT +// Author: pthrasher +// adapted by gre from https://gist.github.com/pthrasher/8e6226b215548ba12734 + +uniform float smoothness; // = 0.03 +uniform vec2 center; // = vec2(0.5) +uniform vec3 color; // = vec3(0.0) + +const float M_PI = 3.14159265358979323846; + +float quadraticInOut(float t) { + float p = 2.0 * t * t; + return t < 0.5 ? p : -p + (4.0 * t) - 1.0; +} + +float getGradient(float r, float dist) { + float d = r - dist; + return mix( + smoothstep(-smoothness, 0.0, r - dist * (1.0 + smoothness)), + -1.0 - step(0.005, d), + step(-0.005, d) * step(d, 0.01) + ); +} + +float getWave(vec2 p){ + vec2 _p = p - center; // offset from center + float rads = atan(_p.y, _p.x); + float degs = degrees(rads) + 180.0; + vec2 range = vec2(0.0, M_PI * 30.0); + vec2 domain = vec2(0.0, 360.0); + float ratio = (M_PI * 30.0) / 360.0; + degs = degs * ratio; + float x = progress; + float magnitude = mix(0.02, 0.09, smoothstep(0.0, 1.0, x)); + float offset = mix(40.0, 30.0, smoothstep(0.0, 1.0, x)); + float ease_degs = quadraticInOut(sin(degs)); + float deg_wave_pos = (ease_degs * magnitude) * sin(x * offset); + return x + deg_wave_pos; +} + +vec4 transition(vec2 p) { + float dist = distance(center, p); + float m = getGradient(getWave(p), dist); + vec4 cfrom = getFromColor(p); + vec4 cto = getToColor(p); + return mix(mix(cfrom, cto, m), mix(cfrom, vec4(color, 1.0), 0.75), step(m, -2.0)); +} diff --git a/ffmpeg/transitions/glsl/wind.glsl b/ffmpeg/transitions/glsl/wind.glsl new file mode 100644 index 0000000..f1aa824 --- /dev/null +++ b/ffmpeg/transitions/glsl/wind.glsl @@ -0,0 +1,19 @@ +// Author: gre +// License: MIT + +// Custom parameters +uniform float size; // = 0.2 + +float rand (vec2 co) { + return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453); +} + +vec4 transition (vec2 uv) { + float r = rand(vec2(0, uv.y)); + float m = smoothstep(0.0, -size, uv.x*(1.0-size) + size*r - (progress * (1.0 + size))); + return mix( + getFromColor(uv), + getToColor(uv), + m + ); +} diff --git a/ffmpeg/transitions/glsl/windowblinds.glsl b/ffmpeg/transitions/glsl/windowblinds.glsl new file mode 100644 index 0000000..ad8f8b1 --- /dev/null +++ b/ffmpeg/transitions/glsl/windowblinds.glsl @@ -0,0 +1,15 @@ +// Author: Fabien Benetou +// License: MIT + +vec4 transition (vec2 uv) { + float t = progress; + + if (mod(floor(uv.y*100.*progress),2.)==0.) + t*=2.-.5; + + return mix( + getFromColor(uv), + getToColor(uv), + mix(t, progress, smoothstep(0.8, 1.0, progress)) + ); +} diff --git a/ffmpeg/transitions/glsl/windowslice.glsl b/ffmpeg/transitions/glsl/windowslice.glsl new file mode 100644 index 0000000..20bf5d2 --- /dev/null +++ b/ffmpeg/transitions/glsl/windowslice.glsl @@ -0,0 +1,11 @@ +// Author: gre +// License: MIT + +uniform float count; // = 10.0 +uniform float smoothness; // = 0.5 + +vec4 transition (vec2 p) { + float pr = smoothstep(-smoothness, 0.0, p.x - progress * (1.0 + smoothness)); + float s = step(pr, fract(count * p.x)); + return mix(getFromColor(p), getToColor(p), s); +} diff --git a/ffmpeg/transitions/glsl/wipeDown.glsl b/ffmpeg/transitions/glsl/wipeDown.glsl new file mode 100644 index 0000000..8f979e1 --- /dev/null +++ b/ffmpeg/transitions/glsl/wipeDown.glsl @@ -0,0 +1,9 @@ +// Author: Jake Nelson +// License: MIT + +vec4 transition(vec2 uv) { + vec2 p=uv.xy/vec2(1.0).xy; + vec4 a=getFromColor(p); + vec4 b=getToColor(p); + return mix(a, b, step(1.0-p.y,progress)); +} diff --git a/ffmpeg/transitions/glsl/wipeLeft.glsl b/ffmpeg/transitions/glsl/wipeLeft.glsl new file mode 100644 index 0000000..d38c5e2 --- /dev/null +++ b/ffmpeg/transitions/glsl/wipeLeft.glsl @@ -0,0 +1,9 @@ +// Author: Jake Nelson +// License: MIT + +vec4 transition(vec2 uv) { + vec2 p=uv.xy/vec2(1.0).xy; + vec4 a=getFromColor(p); + vec4 b=getToColor(p); + return mix(a, b, step(1.0-p.x,progress)); +} diff --git a/ffmpeg/transitions/glsl/wipeRight.glsl b/ffmpeg/transitions/glsl/wipeRight.glsl new file mode 100644 index 0000000..54e06af --- /dev/null +++ b/ffmpeg/transitions/glsl/wipeRight.glsl @@ -0,0 +1,9 @@ +// Author: Jake Nelson +// License: MIT + +vec4 transition(vec2 uv) { + vec2 p=uv.xy/vec2(1.0).xy; + vec4 a=getFromColor(p); + vec4 b=getToColor(p); + return mix(a, b, step(0.0+p.x,progress)); +} diff --git a/ffmpeg/transitions/glsl/wipeUp.glsl b/ffmpeg/transitions/glsl/wipeUp.glsl new file mode 100644 index 0000000..ddd0edc --- /dev/null +++ b/ffmpeg/transitions/glsl/wipeUp.glsl @@ -0,0 +1,9 @@ +// Author: Jake Nelson +// License: MIT + +vec4 transition(vec2 uv) { + vec2 p=uv.xy/vec2(1.0).xy; + vec4 a=getFromColor(p); + vec4 b=getToColor(p); + return mix(a, b, step(0.0+p.y,progress)); +} diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..c96efb4 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,8 @@ +graphviz~=0.16 +matplotlib~=3.2.2 +numpy~=1.19.5 +pillow~=8.0.1 +project-pkgs~=1.0.0 +setuptools~=49.2.1 +tqdm~=4.47.0 +psutil~=5.8.0 \ No newline at end of file diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..a4724ca --- /dev/null +++ b/setup.py @@ -0,0 +1,48 @@ +''' +Date: 2021-02-28 17:06:01 +LastEditors: Rustle Karl +LastEditTime: 2021.05.04 13:03:28 +''' +import os.path + +from setuptools import setup + +from ffmpeg import __version__ + +# What packages are required for this module to be executed? +requires = [ + 'graphviz', + 'project-pkgs', + 'tqdm', +] + +# Import the README and use it as the long-description. +cwd = os.path.abspath(os.path.dirname(__file__)) +with open(os.path.join(cwd, 'README.md'), encoding='utf-8') as f: + long_description = f.read() + +setup( + name='ffmpeg-generator', + packages=[ + 'ffmpeg', + 'ffmpeg.expression', + 'ffmpeg.filters', + 'ffmpeg.tools', + 'ffmpeg.transitions', + ], + version=__version__, + license='MIT', + author='Rustle Karl', + author_email='fu.jiawei@outlook.com', + description='Python bindings for FFmpeg - with almost all filters support, even `gltransition` filter.', + long_description=long_description, + long_description_content_type='text/markdown', + keywords=['ffmpeg', 'ffprobe', 'ffplay'], + classifiers=[ + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.8', + ], + install_requires=requires, +)