Merge branch 'xoxfaby:root' into root

This commit is contained in:
RomanNum3ral 2024-10-27 10:37:27 -04:00 committed by GitHub
commit c785cb8f01
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
239 changed files with 31258 additions and 3381 deletions

View File

@ -1,5 +1,5 @@
# AUTOGENERATED COPYRIGHT HEADER START
# Copyright (C) 2017-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
# Copyright (C) 2017-2024 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
# AUTOGENERATED COPYRIGHT HEADER END
# top-most EditorConfig file
@ -16,3 +16,6 @@ indent_size = 4
[*.yml]
indent_style = space
indent_size = 2
[*.md]
trim_trailing_whitespace = false

View File

@ -7,12 +7,6 @@ title: "REPLACE ME"
description: "This form is for bug and crash reports only, primarily used by developers. Abuse of this form will lead to a permanent interaction ban."
labels: ["bug"]
body:
- type: textarea
attributes:
label: "OBS Studio Logs"
description: "Paste the content or attach the log files from OBS Studio here. In the event of a crash, paste or attach both the crash log and the normal log file."
validations:
required: true
- type: textarea
attributes:
label: "Current and Expected Behavior"
@ -25,6 +19,12 @@ body:
description: "What steps are required to consistently reproduce the bug/crash/freeze?"
validations:
required: true
- type: textarea
attributes:
label: "Log files & Crash Dumps"
description: "Paste the content or attach the log files from OBS Studio here. In the event of a crash, paste or attach both the crash log and the normal log file."
validations:
required: false
- type: textarea
attributes:
label: "Any additional Information we need to know?"

View File

@ -1,5 +1,5 @@
# AUTOGENERATED COPYRIGHT HEADER START
# Copyright (C) 2019-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
# Copyright (C) 2019-2024 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
# AUTOGENERATED COPYRIGHT HEADER END
name: Build
@ -19,337 +19,71 @@ concurrency:
cancel-in-progress: true
env:
CACHE_VERSION: "2022-12-02"
CACHE_VERSION: "2024-01-25"
jobs:
windows:
build:
strategy:
fail-fast: false
matrix:
runner: [ "windows-2022" ]
name: [ "Windows" ]
compiler: [ "MSVC", "Clang" ]
qt: [ 6 ]
runner: [ "windows-2022", "macos-12", "ubuntu-22.04" ]
compiler: [ "MSVC", "GCC-12", "Clang-17", "AppleClang" ]
include:
- compiler: "MSVC"
- runner: "windows-2022"
compiler: "MSVC"
platform: "windows"
name: "Windows"
CMAKE_SYSTEM_VERSION: "10.0.20348.0"
CMAKE_GENERATOR: "Visual Studio 17 2022"
CMAKE_GENERATOR_PLATFORM: "x64"
- compiler: "Clang"
CMAKE_SYSTEM_VERSION: "10.0.20348.0"
CMAKE_GENERATOR: "Visual Studio 17 2022"
CMAKE_GENERATOR_TOOLSET: "ClangCL"
CMAKE_GENERATOR_PLATFORM: "x64"
runs-on: "${{ matrix.runner }}"
name: "${{ matrix.name }} (${{ matrix.compiler }})"
env:
CMAKE_GENERATOR: "${{ matrix.CMAKE_GENERATOR }}"
CMAKE_GENERATOR_PLATFORM: "${{ matrix.CMAKE_GENERATOR_PLATFORM }}"
CMAKE_GENERATOR_TOOLSET: "${{ matrix.CMAKE_GENERATOR_TOOLSET }}"
CMAKE_SYSTEM_VERSION: "${{ matrix.CMAKE_SYSTEM_VERSION }}"
steps:
- name: "Clone"
uses: actions/checkout@v3
with:
submodules: recursive
fetch-depth: 0
- name: "Gather Information"
id: info
shell: bash
run: |
# Define buildspec file
buildspec="${{ github.workspace }}/third-party/obs-studio/buildspec.json"
# Prebuilt Dependencies Version
IFS=$'\n' buildspecdata=($(node tools/buildspec.js "${buildspec}" "prebuilt" "windows-x64"))
echo "obs_deps_version=${buildspecdata[0]}" >> "$GITHUB_ENV"
echo "obs_deps_hash=${buildspecdata[1]}" >> "$GITHUB_ENV"
echo "obs_deps_url=${buildspecdata[2]}" >> "$GITHUB_ENV"
# Qt Version
IFS=$'\n' buildspecdata=($(node tools/buildspec.js "${buildspec}" "qt${{ matrix.qt }}" "windows-x64"))
echo "qt_version=${buildspecdata[0]}" >> "$GITHUB_ENV"
echo "qt_hash=${buildspecdata[1]}" >> "$GITHUB_ENV"
echo "qt_url=${buildspecdata[2]}" >> "$GITHUB_ENV"
# libOBS Version
echo "obs_version=$(cd "${{ github.workspace }}/third-party/obs-studio" && git describe --tags --long)" >> "$GITHUB_ENV"
- name: "Dependency: Qt (Cache)"
id: qt-cache
uses: actions/cache@v3
with:
path: "${{ github.workspace }}/build/qt"
key: "qt${{ env.qt_hash }}-${{ env.CACHE_VERSION }}"
- name: "Dependency: Qt"
id: qt
if: ${{ steps.qt-cache.outputs.cache-hit != 'true' }}
shell: bash
run: |
curl --retry 5 --retry-delay 30 -jLo /tmp/qt.zip "${{ env.qt_url }}"
if [[ ! -f "${{ github.workspace }}/build/qt" ]]; then mkdir -p "${{ github.workspace }}/build/qt"; fi
7z x -y -o"${{ github.workspace }}/build/qt" -- "/tmp/qt.zip"
- name: "Dependency: Prebuilt OBS Studio Dependencies (Cache)"
id: obsdeps-cache
uses: actions/cache@v3
with:
path: "${{ github.workspace }}/build/obsdeps"
key: "obsdeps${{ env.obs_deps_hash }}-${{ env.CACHE_VERSION }}"
- name: "Dependency: Prebuilt OBS Studio Dependencies"
id: obsdeps
if: ${{ steps.obsdeps-cache.outputs.cache-hit != 'true' }}
shell: bash
run: |
curl --retry 5 --retry-delay 30 -jLo /tmp/obsdeps.zip "${{ env.obs_deps_url }}"
if [[ ! -f "${{ github.workspace }}/build/obsdeps" ]]; then mkdir -p "${{ github.workspace }}/build/obsdeps"; fi
7z x -y -o"${{ github.workspace }}/build/obsdeps" -- "/tmp/obsdeps.zip"
- name: "Dependency: OBS Libraries (Cache)"
id: obs-cache
uses: actions/cache@v3
with:
path: "${{ github.workspace }}/build/obs"
key: "obs${{ env.obs_version }}-${{ matrix.runner }}_${{ matrix.compiler }}-obsdeps${{ env.obs_deps_hash }}-qt${{ env.qt_hash }}-${{ env.CACHE_VERSION }}"
- name: "Dependency: OBS Libraries"
id: obs
if: ${{ steps.obs-cache.outputs.cache-hit != 'true' }}
env:
# obs-studio does not support ClangCL
CMAKE_GENERATOR_TOOLSET: ""
shell: bash
run: |
# Apply patches to obs-studio
pushd "${{ github.workspace }}/third-party/obs-studio" > /dev/null
for f in ../../patches/obs-studio/*.patch; do
echo "Applying patch '${f}''..."
[ -e "$f" ] || continue
git apply "$f"
done
popd > /dev/null
# Build obs-studio
cmake \
-S "${{ github.workspace }}/third-party/obs-studio" \
-B "${{ github.workspace }}/build/obs" \
-DCMAKE_SYSTEM_VERSION="${{ env.CMAKE_SYSTEM_VERSION }}" \
-DCMAKE_INSTALL_PREFIX="${{ github.workspace }}/build/obs/install" \
-DCMAKE_PREFIX_PATH="${{ github.workspace }}/build/obsdeps;${{ github.workspace }}/build/qt" \
-DENABLE_PLUGINS=OFF \
-DENABLE_UI=OFF \
-DENABLE_SCRIPTING=OFF
cmake \
--build "${{ github.workspace }}/build/obs" \
--config RelWithDebInfo \
--target obs-frontend-api
cmake \
--install "${{ github.workspace }}/build/obs" \
--config RelWithDebInfo \
--component obs_libraries
- name: "Configure"
continue-on-error: true
shell: bash
run: |
cmake \
-S "${{ github.workspace }}" \
-B "${{ github.workspace }}/build/ci" \
-DCMAKE_SYSTEM_VERSION="${{ env.CMAKE_SYSTEM_VERSION }}" \
-DCMAKE_INTERPROCEDURAL_OPTIMIZATION=ON \
-Dlibobs_DIR="${{ github.workspace }}/build/obs/install" \
-DQt${{ matrix.qt }}_DIR="${{ github.workspace }}/build/qt" \
-DFFmpeg_DIR="${{ github.workspace }}/build/obsdeps" \
-DCURL_DIR="${{ github.workspace }}/build/obsdeps"
- name: "Build: Debug"
continue-on-error: true
shell: bash
env:
CMAKE_BUILD_TYPE: "Debug"
run: |
cmake --build "build/ci" --config ${{ env.CMAKE_BUILD_TYPE }} --target StreamFX
- name: "Build: Release"
shell: bash
env:
CMAKE_BUILD_TYPE: "RelWithDebInfo"
run: |
cmake --build "build/ci" --config ${{ env.CMAKE_BUILD_TYPE }} --target StreamFX
macos:
strategy:
fail-fast: false
matrix:
runner: [ "macos-12" ]
compiler: [ "Clang" ]
qt: [ 6 ]
include:
- compiler: "Clang"
- runner: "macos-12"
compiler: "AppleClang"
platform: "macos"
name: "MacOS"
CMAKE_GENERATOR: "Xcode"
CMAKE_OSX_DEPLOYMENT_TARGET: "10.15"
CMAKE_OSX_ARCHITECTURES: "x86_64;arm64"
- runner: "ubuntu-22.04"
compiler: "GCC-12"
platform: "ubuntu"
name: "Ubuntu 22.04"
CMAKE_GENERATOR: "Ninja Multi-Config"
- runner: "ubuntu-22.04"
compiler: "Clang-17"
platform: "ubuntu"
name: "Ubuntu 22.04 (Clang)"
CMAKE_GENERATOR: "Ninja Multi-Config"
exclude:
- runner: "windows-2022"
compiler: "GCC-12"
- runner: "windows-2022"
compiler: "Clang-17"
- runner: "windows-2022"
compiler: "AppleClang"
- runner: "macos-12"
compiler: "MSVC"
- runner: "macos-12"
compiler: "GCC-12"
- runner: "macos-12"
compiler: "Clang-17"
- runner: "ubuntu-22.04"
compiler: "MSVC"
- runner: "ubuntu-22.04"
compiler: "AppleClang"
name: "${{ matrix.name}} (${{ matrix.compiler}})"
runs-on: "${{ matrix.runner }}"
name: "${{ matrix.name }} (${{ matrix.compiler }})"
env:
CMAKE_BUILD_TYPE: "RelWithDebInfo"
CMAKE_GENERATOR: "${{ matrix.CMAKE_GENERATOR }}"
CMAKE_GENERATOR_PLATFORM: "${{ matrix.CMAKE_GENERATOR_PLATFORM }}"
CMAKE_GENERATOR_TOOLSET: "${{ matrix.CMAKE_GENERATOR_TOOLSET }}"
CMAKE_SYSTEM_VERSION: "${{ matrix.CMAKE_SYSTEM_VERSION }}"
CMAKE_OSX_DEPLOYMENT_TARGET: "${{ matrix.CMAKE_OSX_DEPLOYMENT_TARGET }}"
CMAKE_OSX_ARCHITECTURES: "${{ matrix.CMAKE_OSX_ARCHITECTURES }}"
steps:
- name: "Clone"
uses: actions/checkout@v3
with:
submodules: recursive
fetch-depth: 0
- name: "Gather Information"
id: info
- name: "Install Compiler"
if: ${{ matrix.platform == 'linux' }}
shell: bash
run: |
# Define buildspec file
buildspec="${{ github.workspace }}/third-party/obs-studio/buildspec.json"
# Prebuilt Dependencies Version
IFS=$'\n' buildspecdata=($(node tools/buildspec.js "${buildspec}" "prebuilt" "macos-universal"))
echo "obs_deps_version=${buildspecdata[0]}" >> "$GITHUB_ENV"
echo "obs_deps_hash=${buildspecdata[1]}" >> "$GITHUB_ENV"
echo "obs_deps_url=${buildspecdata[2]}" >> "$GITHUB_ENV"
# Qt Version
IFS=$'\n' buildspecdata=($(node tools/buildspec.js "${buildspec}" "qt${{ matrix.qt }}" "macos-universal"))
echo "qt_version=${buildspecdata[0]}" >> "$GITHUB_ENV"
echo "qt_hash=${buildspecdata[1]}" >> "$GITHUB_ENV"
echo "qt_url=${buildspecdata[2]}" >> "$GITHUB_ENV"
# libOBS Version
echo "obs_version=$(cd "${{ github.workspace }}/third-party/obs-studio" && git describe --tags --long)" >> "$GITHUB_ENV"
- name: "Dependency: Qt (Cache)"
id: qt-cache
uses: actions/cache@v3
with:
path: "${{ github.workspace }}/build/qt"
key: "qt${{ env.qt_hash }}-${{ env.CACHE_VERSION }}"
- name: "Dependency: Qt"
id: qt
if: ${{ steps.qt-cache.outputs.cache-hit != 'true' }}
shell: bash
run: |
curl --retry 5 --retry-delay 30 -jLo /tmp/qt.tar.xz "${{ env.qt_url }}"
if [[ ! -f "${{ github.workspace }}/build/qt" ]]; then mkdir -p "${{ github.workspace }}/build/qt"; fi
tar -xvf "/tmp/qt.tar.xz" -C "${{ github.workspace }}/build/qt"
- name: "Dependency: Prebuilt OBS Studio Dependencies (Cache)"
id: obsdeps-cache
uses: actions/cache@v3
with:
path: "${{ github.workspace }}/build/obsdeps"
key: "obsdeps${{ env.obs_deps_hash }}-${{ env.CACHE_VERSION }}"
- name: "Dependency: Prebuilt OBS Studio Dependencies"
id: obsdeps
if: ${{ steps.obsdeps-cache.outputs.cache-hit != 'true' }}
shell: bash
run: |
curl --retry 5 --retry-delay 30 -jLo /tmp/obsdeps.tar.xz "${{ env.obs_deps_url }}"
if [[ ! -f "${{ github.workspace }}/build/obsdeps" ]]; then mkdir -p "${{ github.workspace }}/build/obsdeps"; fi
tar -xvf "/tmp/obsdeps.tar.xz" -C "${{ github.workspace }}/build/obsdeps"
- name: "Dependency: OBS Libraries (Cache)"
id: obs-cache
uses: actions/cache@v3
with:
path: "${{ github.workspace }}/build/obs"
key: "obs${{ env.obs_version }}-${{ matrix.runner }}_${{ matrix.compiler }}--obsdeps${{ env.obs_deps_hash }}-qt${{ env.qt_hash }}-${{ env.CACHE_VERSION }}"
- name: "Dependency: OBS Libraries"
id: obs
if: ${{ steps.obs-cache.outputs.cache-hit != 'true' }}
shell: bash
run: |
# Apply patches to obs-studio
pushd "${{ github.workspace }}/third-party/obs-studio" > /dev/null
for f in ../../patches/obs-studio/*.patch; do
echo "Applying patch '${f}''..."
[ -e "$f" ] || continue
git apply "$f"
done
popd > /dev/null
# Build obs-studio
cmake \
-S "${{ github.workspace }}/third-party/obs-studio" \
-B "${{ github.workspace }}/build/obs" \
-DCMAKE_INSTALL_PREFIX="${{ github.workspace }}/build/obs/install" \
-DCMAKE_PREFIX_PATH="${{ github.workspace }}/build/obsdeps;${{ github.workspace }}/build/qt" \
-DENABLE_PLUGINS=OFF \
-DENABLE_UI=OFF \
-DENABLE_SCRIPTING=OFF
cmake \
--build "${{ github.workspace }}/build/obs" \
--config RelWithDebInfo \
--target obs-frontend-api
cmake \
--install "${{ github.workspace }}/build/obs" \
--config RelWithDebInfo \
--component obs_libraries
- name: "Configure"
continue-on-error: true
shell: bash
run: |
cmake \
-S "${{ github.workspace }}" \
-B "${{ github.workspace }}/build/ci" \
-DCMAKE_INTERPROCEDURAL_OPTIMIZATION=ON \
-Dlibobs_DIR="${{ github.workspace }}/build/obs/install" \
-DQt${{ matrix.qt }}_DIR="${{ github.workspace }}/build/qt" \
-DFFmpeg_DIR="${{ github.workspace }}/build/obsdeps" \
-DCURL_DIR="${{ github.workspace }}/build/obsdeps"
- name: "Build: Debug"
continue-on-error: true
shell: bash
run: |
cmake --build "build/ci" --config Debug --target StreamFX
- name: "Build: Release"
shell: bash
env:
CMAKE_BUILD_TYPE: "RelWithDebInfo"
run: |
cmake --build "build/ci" --config RelWithDebInfo --target StreamFX
ubuntu:
strategy:
fail-fast: false
matrix:
runner: [ "ubuntu-22.04", "ubuntu-20.04" ]
compiler: [ "GCC-12", "GCC-11", "Clang-16" ]
qt: [ 5, 6 ]
CMAKE_GENERATOR: [ "Ninja Multi-Config" ]
exclude:
- runner: "ubuntu-22.04"
qt: 5
- runner: "ubuntu-22.04"
compiler: "GCC-11"
- runner: "ubuntu-20.04"
qt: 6
- runner: "ubuntu-20.04"
compiler: "GCC-12"
include:
- runner: "ubuntu-22.04"
name: "Ubuntu 22.04"
- runner: "ubuntu-20.04"
name: "Ubuntu 20.04"
runs-on: "${{ matrix.runner }}"
name: "${{ matrix.name }} (${{ matrix.compiler }}, Qt${{ matrix.qt }})"
env:
CMAKE_GENERATOR: "${{ matrix.CMAKE_GENERATOR }}"
CMAKE_GENERATOR_PLATFORM: "${{ matrix.CMAKE_GENERATOR_PLATFORM }}"
CMAKE_GENERATOR_TOOLSET: "${{ matrix.CMAKE_GENERATOR_TOOLSET }}"
CMAKE_SYSTEM_VERSION: "${{ matrix.CMAKE_SYSTEM_VERSION }}"
steps:
- name: "Clone"
uses: actions/checkout@v3
with:
submodules: recursive
fetch-depth: 0
- name: "Install Build Tools"
shell: bash
run: |
echo "Installing essential tools..."
sudo apt-get -qq update
sudo apt-get install build-essential checkinstall pkg-config cmake ninja-build git
# Install the appropriate compiler
IFS=$'-' compiler=($(echo "${{ matrix.compiler }}")) # ToDo: Can this be done without invoking a sub-shell?
echo "Installing '${compiler[0]}' version ${compiler[1]}..."
@ -359,6 +93,7 @@ jobs:
echo "CMAKE_C_COMPILER=gcc-${compiler[1]}" >> "$GITHUB_ENV"
echo "CMAKE_CXX_COMPILER=g++-${compiler[1]}" >> "$GITHUB_ENV"
echo "CMAKE_LINKER=gold" >> "$GITHUB_ENV"
elif [[ "${compiler[0]}" == "Clang" ]]; then
curl -jLo /tmp/llvm.sh "https://apt.llvm.org/llvm.sh"
chmod +x /tmp/llvm.sh
@ -373,101 +108,119 @@ jobs:
echo "CMAKE_C_COMPILER=clang-${compiler[1]}" >> "$GITHUB_ENV"
echo "CMAKE_CXX_COMPILER=clang++-${compiler[1]}" >> "$GITHUB_ENV"
echo "CMAKE_LINKER=ld.lld-${compiler[1]}" >> "$GITHUB_ENV"
else
echo "Unknown Compiler"
exit 1
fi
- name: "Dependency: Qt"
id: qt
shell: bash
run: |
if [[ ${{ matrix.qt }} -eq 5 ]]; then
sudo apt-get -y install -V \
qtbase5-dev qtbase5-private-dev libqt5svg5-dev
elif [[ ${{ matrix.qt }} -eq 6 ]]; then
sudo apt-get -y install -V \
qt6-base-dev qt6-base-private-dev libqt6svg6-dev libgles2-mesa-dev libegl1-mesa-dev libgl1-mesa-dev
fi
- name: "Dependency: Prebuilt OBS Studio Dependencies"
id: obsdeps
shell: bash
run: |
sudo apt-get -y install -V \
libavcodec-dev libavdevice-dev libavfilter-dev libavformat-dev libavutil-dev libswresample-dev libswscale-dev \
libcurl4-openssl-dev
- name: "Dependency: OBS Libraries (Cache)"
id: obs-cache
uses: actions/cache@v3
- name: "Clone"
uses: "actions/checkout@v4"
with:
path: "${{ github.workspace }}/build/obs"
key: "obs${{ env.obs_version }}-${{ matrix.runner }}_${{ matrix.compiler }}--${{ matrix.runner }}-${{ matrix.compiler }}-${{ env.CACHE_VERSION }}"
- name: "Dependency: OBS Libraries"
id: obs
if: ${{ steps.obs-cache.outputs.cache-hit != 'true' }}
fetch-depth: 0
fetch-tags: true
- name: "Fetch"
shell: bash
run: |
# Apply patches to obs-studio
pushd "${{ github.workspace }}/third-party/obs-studio" > /dev/null
for f in ../../patches/obs-studio/*.patch; do
echo "Applying patch '${f}''..."
[ -e "$f" ] || continue
git apply "$f"
done
popd > /dev/null
# Extra requirements by libobs on Linux.
sudo apt-get install \
libavcodec-dev libavdevice-dev libavfilter-dev libavformat-dev libavutil-dev libswresample-dev libswscale-dev \
libx264-dev libcurl4-openssl-dev libmbedtls-dev libgl1-mesa-dev libjansson-dev libluajit-5.1-dev python3-dev \
libx11-dev libxcb-randr0-dev libxcb-shm0-dev libxcb-xinerama0-dev libxcomposite-dev libxinerama-dev \
libxcb1-dev libx11-xcb-dev libxcb-xfixes0-dev swig libcmocka-dev libxss-dev libglvnd-dev libgles2-mesa \
libgles2-mesa-dev libwayland-dev \
libasound2-dev libfdk-aac-dev libfontconfig-dev libfreetype6-dev libjack-jackd2-dev libpulse-dev \
libsndio-dev libspeexdsp-dev libudev-dev libv4l-dev libva-dev libvlc-dev libdrm-dev
# Build obs-studio
cmake \
-S "${{ github.workspace }}/third-party/obs-studio" \
-B "${{ github.workspace }}/build/obs" \
-G "Unix Makefiles" \
-DCMAKE_BUILD_TYPE="Release" \
-DCMAKE_C_COMPILER="${{ env.CMAKE_C_COMPILER }}" \
-DCMAKE_CXX_COMPILER="${{ env.CMAKE_CXX_COMPILER }}" \
-DCMAKE_C_FLAGS="${{ env.CMAKE_C_FLAGS }}" \
-DCMAKE_CXX_FLAGS="${{ env.CMAKE_CXX_FLAGS }}" \
-DCMAKE_INSTALL_PREFIX="${{ github.workspace }}/build/obs/install" \
-DCMAKE_PREFIX_PATH="${{ github.workspace }}/build/obsdeps;${{ github.workspace }}/build/qt" \
-DENABLE_PLUGINS=OFF \
-DENABLE_UI=OFF \
-DENABLE_SCRIPTING=OFF
cmake \
--build "${{ github.workspace }}/build/obs" \
--config Release \
--target obs-frontend-api
cmake \
--install "${{ github.workspace }}/build/obs" \
--config Release \
--component obs_libraries
- name: "Configure"
./tools/build.sh fetch
- name: "Gather"
shell: bash
run: |
echo "OBS_VERSION=$(cd third-party/obs-studio/ && git describe --tags --long --abbrev=8 HEAD)" >> "${GITHUB_ENV}"
- name: "Patch"
shell: bash
run: |
./tools/build.sh patch
- name: "Prerequisites"
shell: bash
run: |
./tools/build.sh prerequisites
- name: "libOBS: Restore Cache"
id: libobs
uses: actions/cache/restore@v3
with:
path: |
${{ github.workspace }}/third-party/obs-studio/.deps
${{ github.workspace }}/third-party/obs-studio/build/install
key: "${{ secrets.CACHE_VERSION }}-${{ env.CACHE_VERSION }}-${{ env.OBS_VERSION }}-${{ matrix.compiler }}-libobs"
- name: "libOBS"
if: ${{ (steps.libobs.outputs.cache-hit != 'true') }}
shell: bash
run: |
./tools/build.sh libobs
- name: "libOBS: Save Cache"
if: ${{ (steps.libobs.outputs.cache-hit != 'true') }}
uses: actions/cache/save@v3
with:
path: |
${{ github.workspace }}/third-party/obs-studio/.deps
${{ github.workspace }}/third-party/obs-studio/build/install
key: "${{ secrets.CACHE_VERSION }}-${{ env.CACHE_VERSION }}-${{ env.OBS_VERSION }}-${{ matrix.compiler }}-libobs"
- name: "Build: Restore Cache"
if: ${{ (steps.libobs.outputs.cache-hit == 'true') && (startsWith(github.ref, 'refs/heads/')) }}
uses: actions/cache/restore@v3
with:
path: |
${{ github.workspace }}/build
key: "${{ secrets.CACHE_VERSION }}-${{ env.CACHE_VERSION }}-${{ env.OBS_VERSION }}-${{ matrix.compiler }}-build"
- name: "Build"
shell: bash
run: |
./tools/build.sh configure \
--build "${{ github.workspace }}/build" \
--install "${{ github.workspace }}/build/install" \
--package "${{ github.workspace }}/build/package" \
--package-name "StreamFX ${{ matrix.name }} (for OBS Studio v${{ env.OBS_VERSION }}) v"
_r=$?; if [[ $_r != 0 ]]; then exit "$_r"; fi
./tools/build.sh build \
--build "${{ github.workspace }}/build"
_r=$?; if [[ $_r != 0 ]]; then exit "$_r"; fi
echo "STREAMFX_VERSION=$(LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 grep -hoPe "[0-9]+\.[0-9]+\.[0-9]+(|[abc][0-9]+)-g[a-fA-F0-9]+" "${{ github.workspace }}/build/generated/version.hpp")" >> "${GITHUB_ENV}"
- name: "Build: Save Cache"
if: ${{ startsWith(github.ref, 'refs/heads/') }}
uses: actions/cache/save@v3
with:
path: |
${{ github.workspace }}/build
key: "${{ secrets.CACHE_VERSION }}-${{ env.CACHE_VERSION }}-${{ env.OBS_VERSION }}-${{ matrix.compiler }}-build"
- name: "Install"
shell: bash
run: |
./tools/build.sh install \
--build "${{ github.workspace }}/build"
- name: "Packaging: Install InnoSetup"
if: startsWith( matrix.runner, 'windows' )
run: |
curl "-kL" "https://cdn.xaymar.com/ci/innosetup-6.2.1.exe" "-f" "--retry" "5" "-o" "inno.exe"
.\inno.exe /VERYSILENT /SP- /SUPPRESSMSGBOXES /NORESTART
- name: 'Packaging: Install Packages'
continue-on-error: true
if: startsWith( matrix.runner, 'macos' )
shell: bash
run: |
cmake \
-S "${{ github.workspace }}" \
-B "${{ github.workspace }}/build/ci" \
-DCMAKE_C_COMPILER="${{ env.CMAKE_C_COMPILER }}" \
-DCMAKE_CXX_COMPILER="${{ env.CMAKE_CXX_COMPILER }}" \
-DCMAKE_INTERPROCEDURAL_OPTIMIZATION=ON \
-DCMAKE_INSTALL_PREFIX="${{ github.workspace }}/build/ci/install" \
-DPACKAGE_NAME="streamfx-${{ env.PACKAGE_NAME }}" \
-DPACKAGE_PREFIX="${{ github.workspace }}/build/package" \
-Dlibobs_DIR="${{ github.workspace }}/build/obs/install"
- name: "Build: Debug"
continue-on-error: true
curl -kL https://cdn.xaymar.com/ci/Packages-1.2.10.dmg -f --retry 5 -o "Packages.dmg"
sudo hdiutil attach ./Packages.dmg
pushd /Volumes/Packages*
sudo installer -pkg ./Install\ Packages.pkg -target /
- name: "Packaging"
if: startsWith( matrix.runner, 'windows' )
shell: cmd
run: |
"C:\Program Files (x86)\Inno Setup 6\ISCC.exe" /V10 ".\build\installer.iss"
- name: "Packaging"
if: startsWith( matrix.runner, 'ubuntu' )
shell: bash
run: |
cmake --build "build/ci" --config Debug --target StreamFX
- name: "Build: Release"
mkdir "${{ github.workspace }}/build/package"
cmake --build "${{ github.workspace }}/build" --config RelWithDebInfo --target PACKAGE
- name: "Packaging"
if: startsWith( matrix.runner, 'macos' )
shell: bash
run: |
cmake --build "build/ci" --config RelWithDebInfo --target StreamFX
packagesbuild "${{ github.workspace }}/build/installer.pkgproj"
- name: "Artifacts"
uses: actions/upload-artifact@v4
with:
name: "StreamFX ${{ matrix.name }} (for OBS Studio v${{ env.OBS_VERSION }}) v${{ env.STREAMFX_VERSION }}"
path: "${{ github.workspace }}/build/package"
if-no-files-found: error
compression-level: 0

28
.gitmodules vendored
View File

@ -1,25 +1,51 @@
# AUTOGENERATED COPYRIGHT HEADER START
# Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
# Copyright (C) 2020-2024 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
# AUTOGENERATED COPYRIGHT HEADER END
[submodule "cmake/clang"]
path = cmake/clang
url = https://github.com/Xaymar/cmake-clang.git
fetchRecurseSubmodules = true
ignore = all
shallow = true
[submodule "cmake/version"]
path = cmake/version
url = https://github.com/Xaymar/cmake-version.git
fetchRecurseSubmodules = true
ignore = all
shallow = true
[submodule "third-party/nlohmann-json"]
path = third-party/nlohmann-json
url = https://github.com/nlohmann/json.git
fetchRecurseSubmodules = true
ignore = all
shallow = true
[submodule "third-party/msvc-redist-helper"]
path = third-party/msvc-redist-helper
url = https://github.com/Xaymar/msvc-redist-helper.git
fetchRecurseSubmodules = true
ignore = all
shallow = true
[submodule "third-party/nvidia-maxine-ar-sdk"]
path = third-party/nvidia-maxine-ar-sdk
url = https://github.com/NVIDIA/MAXINE-AR-SDK.git
fetchRecurseSubmodules = true
ignore = all
shallow = true
[submodule "third-party/nvidia-maxine-vfx-sdk"]
path = third-party/nvidia-maxine-vfx-sdk
url = https://github.com/NVIDIA/MAXINE-VFX-SDK.git
fetchRecurseSubmodules = true
ignore = all
shallow = true
[submodule "third-party/obs-studio"]
path = third-party/obs-studio
url = https://github.com/obsproject/obs-studio.git
fetchRecurseSubmodules = true
ignore = all
[submodule "third-party/nvidia-maxine-afx-sdk"]
path = third-party/nvidia-maxine-afx-sdk
url = https://github.com/NVIDIA/MAXINE-AFX-SDK.git
fetchRecurseSubmodules = true
ignore = all
shallow = true

View File

@ -3,5 +3,6 @@ Michael Fabian 'Xaymar' Dirks <info@xaymar.com> <github@xaymar.com>
Vainock <39059951+Vainock@users.noreply.github.com> <contact.vainock@gmail.com>
Charles Fettinger <charles@oncorporation.com> <charles@onacloud.org>
Charles Fettinger <charles@oncorporation.com> <charles@Oncorporation.com>
Radegast Stravinsky <radegast.ffxiv@gmail.com> <radegast.ffxiv@gmail.com>
Radegast Stravinsky <radegast.ffxiv@gmail.com> <58457062+Radegast-FFXIV@users.noreply.github.com>
Carsten Braun <info@braun-cloud.de> <info@braun-software-solutions.de>

176
BUILDING.md Normal file
View File

@ -0,0 +1,176 @@
# Building
This document intends to guide you through the process of building StreamFX. It requires understanding of the tools used, and may require you to learn tools yourself before you can advance further in the guide. It is intended to be used by developers and contributors.
## Required Pre-Requisites / Dependencies
- [Git](https://git-scm.com/)
- **Debian / Ubuntu**
`sudo apt install git`
- [CMake](https://cmake.org/) 3.20 (or newer)
- **Debian / Ubuntu**
`sudo apt install cmake`
- A compatible Compiler:
- **Windows**
[Visual Studio](https://visualstudio.microsoft.com/vs/) 2022 or newer
- **MacOS**
Xcode 11.x (or newer) for x86_64
Xcode 12.x (or newer) for arm64
- **Debian / Ubuntu**
- Essential Build Tools:
`sudo apt install build-essential pkg-config checkinstall make ninja-build`
- One of:
- GCC 12 (or newer)
`sudo apt install gcc-12 g++-12`
- [LLVM](https://releases.llvm.org/) Clang 14 (or newer)
`sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"`
- One of:
- ld or gold
`sudo apt install binutils`
- [LLVM](https://releases.llvm.org/) lld
`sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"`
- [mold](https://github.com/rui314/mold)
`sudo apt install mold`
- [Homebrew](https://brew.sh/) (Required, for **MacOS** only)
## Building Bundled
The main method to build StreamFX is to first set up an OBS Studio copy and then integrate the StreamFX repository into it. It is recommended to first [Uninstall](Uninstallation) any currently installed versions of StreamFX to prevent conflicts, as OBS Studio may still attempt to load installed versions of StreamFX in addition to the one in the bundled build.
1. Clone StreamFX recursively with submodules into a directory of your choice.
`git clone --recurse-submodules 'https://github.com/Xaymar/obs-StreamFX.git' .`
2. Navigate to the `third-party/obs-studio/UI/frontend-plugins` directory.
3. Add a symbolic link back to the StreamFX source code here.
- **Windows (Powershell)**
`New-Item -Path streamfx -ItemType SymbolicLink - Value ..\..\..\..\`
- **Windows (Batch)**
`mklink /J streamfx ..\..\..\..\`
- **Debian / Ubuntu**
`ln -s ../../../../ streamfx`
4. Open `CMakeLists.txt` in the same directory and append `add_subdirectory(streamfx)` to the end.
5. Navigate back to `third-party/obs-studio` and follow the follow the [OBS Studio build guide](https://obsproject.com/wiki/install-instructions). A short form of it is below.
1. Check available CMake presets by running:
`cmake --list-presets`
2. Configure for one of the available presets with the command:
- **Windows**
`cmake --preset windows-x64`
- **MacOS**
`cmake --preset macos`
- **Debian / Ubuntu (x86)**
`cmake --preset linux-x86_64`
- **Debian / Ubuntu (ARM)**
`cmake --preset linux-aarch`
3. Open the generated IDE file of your choice and start coding.
6. Done. StreamFX is now part of the build.
## Building Standalone
This method is primarily designed for Continuous Integration and is only used there, and as such requires a significantly more in depth experience with all used tools and projects. You are entirely on your own if you are so daring to choose this method. Here be dragons and stuff.
### Install Prerequisites / Dependencies
- [Qt](https://www.qt.io/) 6:
- **Windows**
Handled by libobs.
- **MacOS**
Handled by libobs and the build script.
- **Debian / Ubuntu:**
`sudo apt install qt6-base-dev qt6-base-private-dev qt6-image-formats-plugins qt6-wayland libqt6svg6-dev libglx-dev libgl1-mesa-dev`
- [CURL](https://curl.se/):
- **Windows**
Handled by libobs.
- **MacOS**
Handled by libobs.
- **Debian / Ubuntu:**
`sudo apt install curl libcurl4-openssl-dev`
- [FFmpeg](https://ffmpeg.org/) (Optional, for FFmpeg component only):
- **Windows**
Handled by libobs.
- **MacOS**
Handled by libobs.
- **Debian / Ubuntu**
`sudo apt install libavcodec-dev libavdevice-dev libavfilter-dev libavformat-dev libavutil-dev libswresample-dev libswscale-dev`
- [LLVM](https://releases.llvm.org/) (Optional, for clang-format and clang-tidy integration only):
- **Windows**
Install using the Windows installer.
- **MacOS**
Install using the MacOS installer, though usually not needed.
- **Debian / Ubuntu**
`sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" all`
- [InnoSetup](https://jrsoftware.org/isinfo.php) (Optional, for **Windows** installer only)
### Steps
1. Open a `git` capable bash shell in your projects directory. (On Windows, git bash is enough).
2. Clone the project:
`git clone https://github.com/Xaymar/obs-StreamFX.git streamfx`
3. Install some required prerequisites:
`./tools/build.sh prerequisites`
4. Update submodules:
`./tools/build.sh fetch`
5. Apply patches:
`./tools/build.sh patch`
6. Build libOBS:
`./tools/build.sh libobs`
7. Build StreamFX:
`./tools/build.sh build`
8. Done.
It is still possible to build using cmake-gui, however it is not recommended anymore.
## CMake Options
The project is intended to be versatile and configurable, so we offer almost everything to be configured on a silver platter directly in CMake (if possible). If StreamFX detects that it is being built together with other projects, it will automatically prefix all options with `StreamFX_` to prevent collisions.
### Generic
- `GIT` (not prefixed)
Path to the `git` binary on your system, for use with features that require git during configuration and generation.
- `VERSION`
Set or override the version of the project with a custom one. Allowed formats are: SemVer 2.0.0, CMake.
### Code
- `ENABLE_CLANG`
Enable integration of `clang-format` and `clang-tidy`
- `CLANG_PATH` (not prefixed, only with `ENABLE_CLANG`)
Path to the `clang` installation containing `clang-format` and `clang-tidy`. Only used as a hint.
- `CLANG_FORMAT_PATH` and `CLANG_TIDY_PATH` (not prefixed)
Path to `clang-format` and `clang-tidy` that will be used.
### Dependencies
- `LibObs_DIR`
Path to the obs-studio libraries.
- `Qt5_DIR`, `Qt6_DIR` or `Qt_DIR` (autodetect)
Path to Qt5 (OBS Studio 27.x and lower) or Qt6 (OBS Studio 28.x and higher).
- `FFmpeg_DIR`
Path to compatible FFmpeg libraries and headers.
- `CURL_DIR`
Path to compatible CURL libraries and headers.
- `AOM_DIR`
Path to compatible AOM libraries and headers.
### Compiling
- `ENABLE_FASTMATH`
Enable fast math optimizations if the compiler supports them. This trades precision for performance, and is usually good enough anyway.
- `ENABLE_LTO`
Enable link time optimization for faster binaries in exchange for longer build times.
- `ENABLE_PROFILING`
Enable CPU and GPU profiling code, this option reduces performance drastically.
- `TARGET_*`
Specify which architecture target the generated binaries will use.
### Components
- `COMPONENT_<NAME>`
Enable the component by the given name.
### Installing & Packaging
These options are only available in CI-Style mode.
- `CMAKE_INSTALL_PREFIX`
The path in which installed content should be placed when building the `install` target.
- `STRUCTURE_PACKAGEMANAGER`
If enabled will install files in a layout compatible with package managers.
- `STRUCTURE_UNIFIED`
Enable to install files in a layout compatible with an OBS Studio plugin manager.
- `PACKAGE_NAME`
The name of the packaged archive, excluding the prefix, suffix and extension.
- `PACKAGE_PREFIX`
The path in which the packages should be placed.
- `PACKAGE_SUFFIX`
The suffix to attach to the name, before the file extension. If left blank will attach the current version string to the package.
- `STRUCTURE_UNIFIED`
Enable to replace the PACKAGE_ZIP target with a target that generates a single `.obs` file instead.
</details>

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +1,18 @@
# Contributing
This document goes over how you (and/or your organization) are expected to contribute. These guidelines are softly enforced and sometimes not required.
This document intends to teach you the proper way to contribute to the project as a set of guidelines. While they aren't always enforced, your chances of your code being accepted are significantly higher when you follow these. For smaller changes, we might opt to squash your changes to apply the guidelines below to your contribution.
## Localization
We use Crowdin to handle translations into many languages, and you can join the [StreamFX project on Crowdin](https://crowdin.com/project/obs-stream-effects) if you are interested in improving the translations to your native tongue. As Crowdin handles all other languages, Pull Requests therefore should only include changes to `en-US.ini`.
<details open><summary><h2 style="display: inline-block;">Repository & Commits</h2></summary>
## Commit Guidelines
Commits should focus on a single change such as formatting, fixing a bug, a warning across the code, and similar things. This means that you should not include a fix to color format handling in a commit that implements a new encoder, or include a fix to a bug with a fix to a warning.
As this is a rather large project, we have certain rules to follow when contributing via git.
### Linear History
This project prefers the linear history of `git rebase` and forbids merge commits. This allows all branches to be a single line back to the root, unless viewed as a whole where it becomes a tree. If you are working on a branch for a feature, bug or other thing, you should know how to rebase back onto the main branch before making a pull request.
We follow the paradigm of linear history which forbids branches from being merged, thus changes made on branches are `git rebase`d back onto the root. This simplifies the code history significantly, but makes reverting changes more difficult.
### Commit Message & Title
We require a commit message format like this:
`git merge`
`git rebase`
### Commits
A commit should be containing a single change, even if it spans multiple units, and has the following format:
```
prefix: short description
@ -19,35 +20,60 @@ prefix: short description
optional long description
```
The `short description` should be no longer than 80 characters, excluding the `prefix: ` part. The `optional long description` should be present if the change is not immediately obvious - however it does not replace proper documentation.
The short description should be no longer than 120 characters and focus on the important things. The long description is optional, but should be included for larger changes.
#### The correct `prefix`
Depending on where the file is that you ended up modifying, or if you modified multiple files at once, the prefix changes. Take a look at the list to understand which directories cause which prefix:
#### The appropriate `prefix`
- `/CMakeLists.txt`, `/cmake` -> `cmake`
- `/.github/workflows` -> `ci`
- `/data/locale`, `/crowdin.yml` -> `locale`
- `/data/examples` -> `examples`
- `/data` -> `data` (if not part of another prefix)
- `/media` -> `media`
- `/source`, `/include` -> `code`
- `/templates` -> `templates` (or merge with `cmake`)
- `/third-party` -> `third-party`
- `/patches` -> `patches`
- `/tools` -> `tools`
- `/ui` -> `ui` (if not part of a `code` change)
- Most other files -> `project`
<table>
<tr>
<th>Path(s)</th>
<th>Prefix</th>
<th>Example</th>
</tr>
<tr>
<td>
data/locale
</td>
<td>locale</td>
<td>
<code>data/locale/en-US.ini</code> -> <code>locale</code>
</td>
</tr>
<tr>
<td>components/name</td>
<td>name</td>
<td>
<code>components/shader</code> -> <code>shader</code>
</td>
</tr>
<tr>
<td>
source<br>
templates<br>
data<br>
ui
</td>
<td>core</td>
<td>
<code>ui/main.ui</code> -> <code>core</code>
</td>
</tr>
<tr>
<td>Anything else</td>
<td><b>Omit the prefix</b></td>
<td></td>
</tr>
</table>
If multiple locations match, they should be alphabetically sorted and separated by `, `. A change to both `ui` and `code` will as such result in a prefix of `code, ui`. If a `code` change only affects a single file, or multiple files with a common parent file, the prefix should be the path of the file, like shown in the following examples:
If multiple match, apply the prefix that changes the most files. If all are equal, alphabetically sort the prefixes and list comma separated.
- `/source/encoders/encoder-ffmpeg` -> `encoder/ffmpeg`
- `/source/filters/filter-shader` -> `filter/shader`
- `/source/encoders/handlers/handler`, `/source/encoders/encoder-ffmpeg` -> `encoder/ffmpeg`
</details>
## Coding Guidelines
<details open><summary><h2 style="display: inline-block;">Coding</h2></summary>
### Documentation
Documentation should be present in areas where it would save time to new developers, and in areas where an API is defined. This means that you should not provide documentation for things like `1 + 1`, but for things like the following:
The short form of the this part is **Code != Documentation**. Documentation is what you intend your Code to do, while Code is what it actually does. If your Code mismatches the Documentation, it is time to fix the Code, unless the change is a new addition in terms of behavior or functionality. Note that by this we don't mean to document things like `1 + 1` but instead things like the following:
```c++
int32_t idepth = static_cast<int32_t>(depth);
@ -58,14 +84,18 @@ int32_t container_size = static_cast<int32_t>(pow(2l, (idepth + (idepth / 2))));
```c++
class magic_class {
void do_magic_thing(float magic_number);
void do_magic_thing(float magic_number) {
// Lots and lots of SIMD code that does a magic thing...
}
}
```
Both of these examples would be much easier to understand if they had proper documentation, and save hours if not even days of delving into code. Documentation is about saving time to new developers, and can't be replaced by code. Code is not Documentation!
Documenting what a block of Code does not only helps you, it also helps other contributors understand what this Code is supposed to do. While you may be able to read your own Code (at least for now), there is no guarantee that either you or someone else will be able to read it in the future. Not only that, but it makes spotting mistakes and fixing them easier, since we have Documentation to tell us what it is supposed to do!
### Naming & Casing
All long-term objects should have a descriptive name, which can be used by other developers to know what it is for. Temporary objects should also have some information, but do not necessarily follow the same rules.
The project isn't too strict about variable naming as well as casing, but we do prefer a universal style across all code. While this may appear as removing your individuality from the code, it ultimately serves the purpose of making it easier to jump from one block of code to the other, without having to guess at what this code now does.
Additionally we prefer it when things are named by what they either do or what they contain, instead of having the entire alphabet spelled out in different arrangements. While it is fine to have chaos in your own Code for your private or hobby projects, it is not fine to submit such code to other projects.
#### Macros
- Casing: ELEPHANT_CASE
@ -249,6 +279,16 @@ Special rules for `class`
#### Members
All class members must be `private` and only accessible through get-/setters. The setter of a member should also validate if the setting is within an allowed range, and throw exceptions if an error occurs. If there is no better option, it is allowed to delay validation until a common function is called.
## Building
Please read [the guide on the wiki](https://github.com/Xaymar/obs-StreamFX/wiki/Building) for building the project.
</details>
<details open><summary><h2 style="display: inline-block;">Localization</h2></summary>
We use Crowdin to handle translations into many languages, and you can join the [StreamFX project on Crowdin](https://crowdin.com/project/obs-stream-effects) if you are interested in improving the translations to your native tongue. As Crowdin handles all other languages, Pull Requests therefore should only include changes to `en-US.ini`.
</details>
## Further Resources
- A guide on how to build the project is in BUILDING.MD.
- A no bullshit guide to `git`: https://rogerdudler.github.io/git-guide/
- Remember, `git` has help pages for all commands - run `git <command> --help`.
- ... or use visual clients, like TortoiseGit, Github Desktop, SourceTree, and similar. It's what I do.

57
README.adoc Normal file
View File

@ -0,0 +1,57 @@
== image:https://raw.githubusercontent.com/Xaymar/obs-StreamFX/master/media/logo.png[alt="StreamFX"]
Upgrade your setup with several modern sources, filters, transitions and encoders using StreamFX! With several performant and flexible features, you will discover new ways to build your scenes, better ways to encode your content, and take your stream to the next level. Create cool new scenes with 3D effects, add glow or shadow, or blur out content - endless choices, and all of it at your fingertips.
++++
<p style="text-align: center; font-weight: bold; font-size: 1.5em;">
<a href="https://github.com/Xaymar/obs-StreamFX/wiki">More Information</a><br/>
<a href="https://github.com/Xaymar/obs-StreamFX/actions"><img src="https://github.com/Xaymar/obs-StreamFX/actions/workflows/main.yml/badge.svg" alt="CI Status" /></a>
<a href="https://crowdin.com/project/obs-stream-effects"><img src="https://badges.crowdin.net/obs-stream-effects/localized.svg" alt="Crowdin Status" /></a>
</p>
++++
=== Support the development of StreamFX!
++++
<a href="https://patreon.com/join/xaymar" target="_blank">
<img height="70px" alt="Patreon" style="height: 70px; float:right;" align="right" src="https://user-images.githubusercontent.com/437395/106462708-bd602980-6496-11eb-8f35-038577cf8fd7.jpg"/>
</a>
++++
Maintaining a project like StreamFX requires time and money, of which both are in short supply. If you use any feature of StreamFX, please consider supporting StreamFX via link:https://patreon.com/xaymar[Patreon]. Even as little as 1€ per month matters a lot, plus you get a number of benefits!
=== License
Licensed under link:https://github.com/Xaymar/obs-StreamFX/blob/root/LICENSE[GPLv3 (or later), see LICENSE]. Additional works included are:
[options="header"]
|=================
|Work |License |Author(s)
|link:https://gen.glad.sh/[GLAD]
|link:https://github.com/Dav1dde/glad/blob/glad2/LICENSE[MIT License]
|link:https://github.com/Dav1dde/glad/graphs/contributors?type=a[Dav1dde, madebr, BtbN, and more]
|link:https://github.com/nlohmann/json[JSON for Modern C++]
|link:https://github.com/nlohmann/json/blob/develop/LICENSE.MIT[MIT License]
|link:https://github.com/nlohmann/json/graphs/contributors?type=a[nlohmann, ChrisKtiching, nickaein, and more]
|link:https://github.com/NVIDIA/MAXINE-AFX-SDK[NVIDIA Maxine Audio Effects SDK]
|link:https://github.com/NVIDIA/MAXINE-AFX-SDK/blob/master/LICENSE[MIT License]
|link:https://nvidia.com/[NVIDIA Corporation]
|link:https://github.com/NVIDIA/MAXINE-AR-SDK[NVIDIA Maxine Augmented Reality SDK]
|link:https://github.com/NVIDIA/MAXINE-Ar-SDK/blob/master/LICENSE[MIT License]
|link:https://nvidia.com/[NVIDIA Corporation]
|link:https://github.com/NVIDIA/MAXINE-VFX-SDK[NVIDIA Maxine Video Effects SDK]
|link:https://github.com/NVIDIA/MAXINE-VFX-SDK/blob/master/LICENSE[MIT License]
|link:https://nvidia.com/[NVIDIA Corporation]
|link:https://github.com/obsproject/obs-studio[Open Broadcaster Software Studio]
|link:https://github.com/obsproject/obs-studio/blob/master/COPYING[GPL-2.0 (or later)]
|link:https://github.com/obsproject/obs-studio/graphs/contributors?type=a[jp9000, computerquip, and more]
|link:https://www.qt.io/[Qt 6.x]
|link:https://www.qt.io/download-open-source[(L)GPL-3.0 (or later)]
|link:https://www.qt.io/[The Qt Company], and open source contributors
|=================

View File

@ -1,157 +1,293 @@
# AUTOGENERATED COPYRIGHT HEADER START
# Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
# Copyright (C) 2020-2024 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
# AUTOGENERATED COPYRIGHT HEADER END
#
# This module defines the following variables:
#
# FFMPEG_FOUND - All required components and the core library were found
# FFMPEG_INCLUDE_DIRS - Combined list of all components include dirs
# FFMPEG_LIBRARIES - Combined list of all componenets libraries
# FFMPEG_VERSION_STRING - Version of the first component requested
# FFmpeg_FOUND - All required components and the core library were found
# FFmpeg_INCLUDE_DIRS - Combined list of all components include dirs
# FFmpeg_LIBRARIES - Combined list of all componenets libraries
# FFmpeg_VERSION_STRING - Version of the first component requested
#
# For each requested component the following variables are defined:
#
# FFMPEG_<component>_FOUND - The component was found
# FFMPEG_<component>_INCLUDE_DIRS - The components include dirs
# FFMPEG_<component>_LIBRARIES - The components libraries
# FFMPEG_<component>_VERSION_STRING - The components version string
# FFMPEG_<component>_VERSION_MAJOR - The components major version
# FFMPEG_<component>_VERSION_MINOR - The components minor version
# FFMPEG_<component>_VERSION_MICRO - The components micro version
# FFmpeg_<component>_FOUND - The component was found
# FFmpeg_<component>_INCLUDE_DIRS - The components include dirs
# FFmpeg_<component>_LIBRARIES - The components libraries
# FFmpeg_<component>_VERSION_STRING - The components version string
# FFmpeg_<component>_VERSION_MAJOR - The components major version
# FFmpeg_<component>_VERSION_MINOR - The components minor version
# FFmpeg_<component>_VERSION_MICRO - The components micro version
#
# <component> is the uppercase name of the component
cmake_minimum_required(VERSION 3.26.0...3.28.1)
include(FindPackageHandleStandardArgs)
include(CMakeParseArguments)
find_package(PkgConfig QUIET)
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
set(_lib_suffix 64)
else()
set(_lib_suffix 32)
endif()
function(find_ffmpeg_component)
cmake_parse_arguments(
_ARGS "" "COMPONENT;OUTPUT" "" ${ARGN}
)
function(find_ffmpeg_library component header)
string(TOUPPER "${component}" component_u)
set(FFMPEG_${component_u}_FOUND FALSE PARENT_SCOPE)
set(FFmpeg_${component}_FOUND FALSE PARENT_SCOPE)
string(TOLOWER "${_ARGS_COMPONENT}" lcomponent)
# Do nothing if the target already exists.
if(TARGET FFmpeg::${lcomponent})
set(FFmpeg_${lcomponent}_FOUND TRUE PARENT_SCOPE)
return()
endif()
set(FFmpeg_${lcomponent}_FOUND OFF PARENT_SCOPE)
# pkg-config
if(PKG_CONFIG_FOUND)
pkg_check_modules(PC_FFMPEG_${component} QUIET lib${component})
pkg_check_modules(PC_FFmpeg_${lcomponent} QUIET lib${lcomponent})
endif()
find_path(FFMPEG_${component}_INCLUDE_DIR
# Find headers
find_path(FFmpeg_${lcomponent}_INCLUDE_DIR
NAMES
"lib${component}/${header}" "lib${component}/version.h"
"${lcomponent}.h"
"lib${lcomponent}.h"
HINTS
ENV FFmpegPath${_lib_suffix}
ENV FFmpegPath
ENV DepsPath${_lib_suffix}
ENV DepsPath
${FFmpegPath${_lib_suffix}}
${FFmpegPath}
${DepsPath${_lib_suffix}}
${DepsPath}
${PC_FFMPEG_${component}_INCLUDE_DIRS}
PATHS
/usr/include /usr/local/include /opt/local/include /sw/include
PATH_SUFFIXES ffmpeg libav include)
find_library(FFMPEG_${component}_LIBRARY
NAMES
"${component}" "lib${component}"
HINTS
ENV FFmpegPath${_lib_suffix}
ENV FFmpegPath
ENV DepsPath${_lib_suffix}
ENV DepsPath
${FFmpegPath${_lib_suffix}}
${FFmpegPath}
${DepsPath${_lib_suffix}}
${DepsPath}
${PC_FFMPEG_${component}_LIBRARY_DIRS}
PATHS
/usr/lib /usr/local/lib /opt/local/lib /sw/lib
${PC_FFmpeg_${lcomponent}_INCLUDE_DIRS}
${FFmpeg_DIR}
PATH_SUFFIXES
lib${_lib_suffix} lib
libs${_lib_suffix} libs
bin${_lib_suffix} bin
../lib${_lib_suffix} ../lib
../libs${_lib_suffix} ../libs
../bin${_lib_suffix} ../bin)
"include/lib${lcomponent}"
"lib${lcomponent}"
set(FFMPEG_${component_u}_INCLUDE_DIRS ${FFMPEG_${component}_INCLUDE_DIR} PARENT_SCOPE)
set(FFMPEG_${component_u}_LIBRARIES ${FFMPEG_${component}_LIBRARY} PARENT_SCOPE)
"include/${lcomponent}"
"${lcomponent}"
mark_as_advanced(FFMPEG_${component}_INCLUDE_DIR FFMPEG_${component}_LIBRARY)
"include"
DOC "${lcomponent}: Path to include directory"
)
mark_as_advanced(FFmpeg_${lcomponent}_INCLUDE_DIR)
if(FFMPEG_${component}_INCLUDE_DIR AND FFMPEG_${component}_LIBRARY)
set(FFMPEG_${component_u}_FOUND TRUE PARENT_SCOPE)
set(FFmpeg_${component}_FOUND TRUE PARENT_SCOPE)
# Find library
math(EXPR LIBSUFFIX "8*${CMAKE_SIZEOF_VOID_P}")
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
set(FIND_LIBRARY_USE_LIB64_PATHS ON)
set(FIND_LIBRARY_USE_LIB32_PATHS OFF)
set(FIND_LIBRARY_USE_LIBX32_PATHS OFF)
else()
set(FIND_LIBRARY_USE_LIB64_PATHS OFF)
set(FIND_LIBRARY_USE_LIB32_PATHS ON)
set(FIND_LIBRARY_USE_LIBX32_PATHS OFF)
endif()
find_library(FFmpeg_${lcomponent}_LIBRARY
NAMES
"${lcomponent}"
"lib${lcomponent}"
HINTS
${PC_FFmpeg_${lcomponent}_LIBRARY_DIRS}
${FFmpeg_DIR}
PATHS
"/lib${LIBSUFFIX}"
"/lib"
list(APPEND FFMPEG_INCLUDE_DIRS ${FFMPEG_${component}_INCLUDE_DIR})
list(REMOVE_DUPLICATES FFMPEG_INCLUDE_DIRS)
set(FFMPEG_INCLUDE_DIRS "${FFMPEG_INCLUDE_DIRS}" PARENT_SCOPE)
"/usr/lib${LIBSUFFIX}"
"/usr/lib"
list(APPEND FFMPEG_LIBRARIES ${FFMPEG_${component}_LIBRARY})
list(REMOVE_DUPLICATES FFMPEG_LIBRARIES)
set(FFMPEG_LIBRARIES "${FFMPEG_LIBRARIES}" PARENT_SCOPE)
"/usr/local/lib${LIBSUFFIX}"
"/usr/local/lib"
PATH_SUFFIXES
"lib${LIBSUFFIX}/lib${lcomponent}"
"lib/lib${lcomponent}"
"lib${lcomponent}"
set(FFMPEG_${component_u}_VERSION_STRING "unknown" PARENT_SCOPE)
set(_vfile "${FFMPEG_${component}_INCLUDE_DIR}/lib${component}/version.h")
"lib${LIBSUFFIX}/${lcomponent}"
"lib/${lcomponent}"
"${lcomponent}"
DOC "${lcomponent}: Path to library file"
)
mark_as_advanced(FFmpeg_${lcomponent}_LIBRARY)
if(EXISTS "${_vfile}")
file(STRINGS "${_vfile}" _version_parse REGEX "^.*VERSION_(MAJOR|MINOR|MICRO)[ \t]+[0-9]+[ \t]*$")
string(REGEX REPLACE ".*VERSION_MAJOR[ \t]+([0-9]+).*" "\\1" _major "${_version_parse}")
string(REGEX REPLACE ".*VERSION_MINOR[ \t]+([0-9]+).*" "\\1" _minor "${_version_parse}")
string(REGEX REPLACE ".*VERSION_MICRO[ \t]+([0-9]+).*" "\\1" _micro "${_version_parse}")
if((FFmpeg_${lcomponent}_LIBRARY) AND (FFmpeg_${lcomponent}_INCLUDE_DIR))
# The include path should be the parent directory.
cmake_path(GET FFmpeg_${lcomponent}_INCLUDE_DIR PARENT_PATH _INCLUDE_DIR)
set(FFMPEG_${component_u}_VERSION_MAJOR "${_major}" PARENT_SCOPE)
set(FFMPEG_${component_u}_VERSION_MINOR "${_minor}" PARENT_SCOPE)
set(FFMPEG_${component_u}_VERSION_MICRO "${_micro}" PARENT_SCOPE)
set(FFMPEG_${component_u}_VERSION_STRING "${_major}.${_minor}.${_micro}" PARENT_SCOPE)
# Detect the library version.
set(_VERSION "")
if((EXISTS "${FFmpeg_${lcomponent}_INCLUDE_DIR}/version_major.h") AND (EXISTS "${FFmpeg_${lcomponent}_INCLUDE_DIR}/version.h"))
# Parse version_major.h:
# - #define LIB${lcomponent}_VERSION_MAJOR 58
# and version.h:
# - #define LIB${lcomponent}_VERSION_MINOR 3
# - #define LIB${lcomponent}_VERSION_MICRO 100
file(STRINGS "${FFmpeg_${lcomponent}_INCLUDE_DIR}/version_major.h" _FILE_VERSION REGEX "^.*_VERSION_(MAJOR|MINOR|MICRO)[ \t]+[1-9]+[0-9]*.*$")
file(STRINGS "${FFmpeg_${lcomponent}_INCLUDE_DIR}/version.h" _FILE_VERSION_MINOR REGEX "^.*_VERSION_(MAJOR|MINOR|MICRO)[ \t]+[1-9]+[0-9]*.*$")
list(APPEND _FILE_VERSION ${_FILE_VERSION_MINOR})
elseif((EXISTS "${FFmpeg_${lcomponent}_INCLUDE_DIR}/version.h"))
# Parse version.h:
# - #define LIB${lcomponent}_VERSION_MAJOR 58
# - #define LIB${lcomponent}_VERSION_MINOR 3
# - #define LIB${lcomponent}_VERSION_MICRO 100
file(STRINGS "${FFmpeg_${lcomponent}_INCLUDE_DIR}/version.h" _FILE_VERSION REGEX "^.*_VERSION_(MAJOR|MINOR|MICRO)[ \t]+[1-9]+[0-9]*.*$")
else()
message(STATUS "Failed parsing FFmpeg ${component} version")
message(WARNING "${lcomponent}: No version header found, defaulting to 0.0.0.")
set(FFmpeg_${lcomponent}_VERSION_MAJOR "0")
set(FFmpeg_${lcomponent}_VERSION_MINOR "0")
set(FFmpeg_${lcomponent}_VERSION_PATCH "0")
endif()
foreach(_FILE_VERSION_PART IN LISTS _FILE_VERSION)
if(_FILE_VERSION_PART MATCHES "^.*MAJOR[ \t]+([1-9]+[0-9]*).*$")
set(FFmpeg_${lcomponent}_VERSION_MAJOR ${CMAKE_MATCH_1})
elseif(_FILE_VERSION_PART MATCHES "^.*MINOR[ \t]+([1-9]+[0-9]*).*$")
set(FFmpeg_${lcomponent}_VERSION_MINOR ${CMAKE_MATCH_1})
elseif(_FILE_VERSION_PART MATCHES "^.*MICRO[ \t]+([1-9]+[0-9]*).*$")
set(FFmpeg_${lcomponent}_VERSION_PATCH ${CMAKE_MATCH_1})
endif()
endforeach()
set(FFmpeg_${lcomponent}_VERSION "${FFmpeg_${lcomponent}_VERSION_MAJOR}.${FFmpeg_${lcomponent}_VERSION_MINOR}.${FFmpeg_${lcomponent}_VERSION_PATCH}")
set(FFmpeg_${lcomponent}_VERSION "${FFmpeg_${lcomponent}_VERSION}" PARENT_SCOPE)
# If possible (shared/module library), find the binary file for it.
find_file(FFmpeg_${lcomponent}_BINARY
NAMES
"${lcomponent}${CMAKE_SHARED_LIBRARY_SUFFIX}.${FFmpeg_${lcomponent}_VERSION_MAJOR}.${FFmpeg_${lcomponent}_VERSION_MINOR}"
"lib${lcomponent}${CMAKE_SHARED_LIBRARY_SUFFIX}.${FFmpeg_${lcomponent}_VERSION_MAJOR}.${FFmpeg_${lcomponent}_VERSION_MINOR}"
"${lcomponent}${CMAKE_SHARED_LIBRARY_SUFFIX}.${FFmpeg_${lcomponent}_VERSION_MAJOR}"
"lib${lcomponent}${CMAKE_SHARED_LIBRARY_SUFFIX}.${FFmpeg_${lcomponent}_VERSION_MAJOR}"
"${lcomponent}-${FFmpeg_${lcomponent}_VERSION_MAJOR}${CMAKE_SHARED_LIBRARY_SUFFIX}"
"lib${lcomponent}-${FFmpeg_${lcomponent}_VERSION_MAJOR}${CMAKE_SHARED_LIBRARY_SUFFIX}"
"${lcomponent}${CMAKE_SHARED_LIBRARY_SUFFIX}"
"lib${lcomponent}${CMAKE_SHARED_LIBRARY_SUFFIX}"
HINTS
${PC_FFmpeg_${lcomponent}_LIBRARY_DIRS}
${FFmpeg_DIR}
PATHS
"/bin${LIBSUFFIX}"
"/bin"
"/usr/bin${LIBSUFFIX}"
"/usr/bin"
"/usr/local/bin${LIBSUFFIX}"
"/usr/local/bin"
"/lib${LIBSUFFIX}"
"/lib"
"/usr/lib${LIBSUFFIX}"
"/usr/lib"
"/usr/local/lib${LIBSUFFIX}"
"/usr/local/lib"
PATH_SUFFIXES
"bin${LIBSUFFIX}/lib${lcomponent}"
"bin/lib${lcomponent}"
"lib${lcomponent}"
"bin${LIBSUFFIX}/${lcomponent}"
"bin/${lcomponent}"
"${lcomponent}"
"bin"
DOC "${lcomponent}: Path to binary file (optional)"
)
if(FFmpeg_${lcomponent}_BINARY)
set(_TARGET_TYPE SHARED)
else()
set(_TARGET_TYPE UNKNOWN)
endif()
# ToDo: Detect if static or shared. Usually should be shared, due to FFmpeg's size.
add_library(FFmpeg::${lcomponent} ${_TARGET_TYPE} IMPORTED)
target_include_directories(FFmpeg::${lcomponent}
INTERFACE "${_INCLUDE_DIR}"
)
set_target_properties(FFmpeg::${lcomponent} PROPERTIES
VERSION ${FFmpeg_${lcomponent}_VERSION}
SOVERSION ${FFmpeg_${lcomponent}_VERSION}
)
if(APPLE OR UNIX) # Unix/Mac, Unix/Linux
set_target_properties(FFmpeg::${lcomponent} PROPERTIES
IMPORTED_LOCATION "${FFmpeg_${lcomponent}_LIBRARY}"
)
if(FFmpeg_${lcomponent}_BINARY)
set_target_properties(FFmpeg::${lcomponent} PROPERTIES
LIBRARY_OUTPUT_NAME "${FFmpeg_${lcomponent}_BINARY}"
)
endif()
else() # Windows
set_target_properties(FFmpeg::${lcomponent} PROPERTIES
IMPORTED_IMPLIB "${FFmpeg_${lcomponent}_LIBRARY}"
)
if(FFmpeg_${lcomponent}_BINARY)
set_target_properties(FFmpeg::${lcomponent} PROPERTIES
IMPORTED_LOCATION "${FFmpeg_${lcomponent}_BINARY}"
LIBRARY_OUTPUT_NAME "${FFmpeg_${lcomponent}_BINARY}"
)
endif()
endif()
set(FFmpeg_${lcomponent}_FOUND ON PARENT_SCOPE)
message(STATUS "${lcomponent}: Found v${FFmpeg_${lcomponent}_VERSION}")
endif()
endfunction()
set(FFMPEG_INCLUDE_DIRS)
set(FFMPEG_LIBRARIES)
# Components
# - avcodec: libavcodec, avcodec.h
# - avdevice: libavdevice, avdevice.h
# - avfilter: libavfilter, avfilter.h
# - avformat: libavformat, avformat.h
# - avutil: libavutil, avutil.h
# - swresample: libswresample, swresample.h
# - swscale: libswscale, swscale.h
set(_COMPONENTS
"avcodec"
"avformat"
"avfilter"
"swresample"
"swscale"
"avdevice"
"avutil"
)
set(FFmpeg_LIBRARIES)
set(FFmpeg_VERSION "0.0.0")
if(NOT FFmpeg_FIND_COMPONENTS)
message(FATAL_ERROR "No FFmpeg components requested")
message(WARNING "No specific component requested, defaulting to everything")
set(FFmpeg_FIND_COMPONENT ${_COMPONENTS})
endif()
list(GET FFmpeg_FIND_COMPONENTS 0 _first_comp)
string(TOUPPER "${_first_comp}" _first_comp)
set(FFmpeg_FOUND ON)
foreach(component IN LISTS FFmpeg_FIND_COMPONENTS)
string(TOUPPER "${component}" lcomponent)
string(TOLOWER "${component}" lcomponent)
foreach(component ${FFmpeg_FIND_COMPONENTS})
if(component STREQUAL "avcodec")
find_ffmpeg_library("${component}" "avcodec.h")
elseif(component STREQUAL "avdevice")
find_ffmpeg_library("${component}" "avdevice.h")
elseif(component STREQUAL "avfilter")
find_ffmpeg_library("${component}" "avfilter.h")
elseif(component STREQUAL "avformat")
find_ffmpeg_library("${component}" "avformat.h")
elseif(component STREQUAL "avresample")
find_ffmpeg_library("${component}" "avresample.h")
elseif(component STREQUAL "avutil")
find_ffmpeg_library("${component}" "avutil.h")
elseif(component STREQUAL "postproc")
find_ffmpeg_library("${component}" "postprocess.h")
elseif(component STREQUAL "swresample")
find_ffmpeg_library("${component}" "swresample.h")
elseif(component STREQUAL "swscale")
find_ffmpeg_library("${component}" "swscale.h")
else()
message(FATAL_ERROR "Unknown FFmpeg component requested: ${component}")
# Error out if an invalid component is requested.
if(NOT lcomponent IN_LIST _COMPONENTS)
message(FATAL_ERROR "Requested component '${lcomponent}' is unknown to us.")
endif()
find_ffmpeg_component(COMPONENT ${lcomponent})
if(NOT TARGET FFmpeg::${lcomponent})
if(FFmpeg_FIND_REQUIRED_${lcomponent})
set(FFmpeg_FOUND OFF)
endif()
endif()
endforeach()
# Set version based on priority list.
foreach(component IN LISTS _COMPONENTS)
string(TOUPPER "${lcomponent}" lcomponent)
string(TOLOWER "${lcomponent}" lcomponent)
if(TARGET FFmpeg::${lcomponent})
set(FFmpeg_VERSION ${FFmpeg_${lcomponent}_VERSION})
break()
endif()
endforeach()
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(FFmpeg
FOUND_VAR FFMPEG_FOUND
REQUIRED_VARS FFMPEG_${_first_comp}_LIBRARIES FFMPEG_${_first_comp}_INCLUDE_DIRS
VERSION_VAR FFMPEG_${_first_comp}_VERSION_STRING
HANDLE_COMPONENTS)
FOUND_VAR FFmpeg_FOUND
HANDLE_COMPONENTS
NAME_MISMATCHED
)

View File

@ -0,0 +1,23 @@
# AUTOGENERATED COPYRIGHT HEADER START
# Copyright (C) 2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
# AUTOGENERATED COPYRIGHT HEADER END
cmake_minimum_required(VERSION 3.26)
project("AutoFraming")
list(APPEND CMAKE_MESSAGE_INDENT "[${PROJECT_NAME}] ")
streamfx_add_component("Auto-Framing"
RESOLVER streamfx_auto_framing_resolver
)
streamfx_add_component_dependency("NVIDIA" OPTIONAL)
function(streamfx_auto_framing_resolver)
# Providers
#- NVIDIA
streamfx_enabled_component("NVIDIA" T_CHECK)
if(T_CHECK)
target_compile_definitions(${COMPONENT_TARGET} PRIVATE
PRIVATE ENABLE_NVIDIA
)
endif()
endfunction()

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,174 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2021-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "gfx/gfx-util.hpp"
#include "obs/gs/gs-texrender.hpp"
#include "obs/gs/gs-texture.hpp"
#include "obs/gs/gs-vertexbuffer.hpp"
#include "obs/obs-source-factory.hpp"
#include "plugin.hpp"
#include "util/util-threadpool.hpp"
#include "util/utility.hpp"
#include "warning-disable.hpp"
#include <atomic>
#include <list>
#include <memory>
#include <mutex>
#include "warning-enable.hpp"
#ifdef ENABLE_NVIDIA
#include "nvidia/ar/nvidia-ar-facedetection.hpp"
#endif
namespace streamfx::filter::autoframing {
enum class tracking_mode : int64_t {
SOLO = 0,
GROUP = 1,
};
enum class tracking_provider : int64_t {
INVALID = -1,
AUTOMATIC = 0,
NVIDIA_FACEDETECTION = 1,
};
const char* cstring(tracking_provider provider);
std::string string(tracking_provider provider);
class autoframing_instance : public obs::source_instance {
struct track_el {
float age;
vec2 pos;
vec2 size;
vec2 vel;
};
struct pred_el {
// Motion-Predicted Position
vec2 mp_pos;
// Filtered Position
streamfx::util::math::kalman1D<float> filter_pos_x;
streamfx::util::math::kalman1D<float> filter_pos_y;
// Offset Filtered Position
vec2 offset_pos;
// Padded Area
vec2 pad_size;
// Aspect-Ratio-Corrected Padded Area
vec2 aspected_size;
};
bool _dirty;
std::pair<uint32_t, uint32_t> _size;
std::pair<uint32_t, uint32_t> _out_size;
std::shared_ptr<::streamfx::gfx::util> _gfx_debug;
std::shared_ptr<::streamfx::obs::gs::effect> _standard_effect;
std::shared_ptr<::streamfx::obs::gs::texrender> _input;
std::shared_ptr<::streamfx::obs::gs::vertexbuffer> _vb;
tracking_provider _provider;
tracking_provider _provider_ui;
std::atomic<bool> _provider_ready;
std::mutex _provider_lock;
std::shared_ptr<util::threadpool::task> _provider_task;
#ifdef ENABLE_NVIDIA
std::shared_ptr<::streamfx::nvidia::ar::facedetection> _nvidia_fx;
#endif
tracking_mode _track_mode;
float _track_frequency;
float _motion_smoothing;
float _motion_smoothing_kalman_pnc;
float _motion_smoothing_kalman_mnc;
float _motion_prediction;
float _frame_stability;
float _frame_stability_kalman;
bool _frame_padding_prc[2];
vec2 _frame_padding;
bool _frame_offset_prc[2];
vec2 _frame_offset;
float _frame_aspect_ratio;
float _track_frequency_counter;
std::list<std::shared_ptr<track_el>> _tracked_elements;
std::map<std::shared_ptr<track_el>, std::shared_ptr<pred_el>> _predicted_elements;
streamfx::util::math::kalman1D<float> _frame_pos_x;
streamfx::util::math::kalman1D<float> _frame_pos_y;
vec2 _frame_pos;
streamfx::util::math::kalman1D<float> _frame_size_x;
streamfx::util::math::kalman1D<float> _frame_size_y;
vec2 _frame_size;
bool _debug;
public:
~autoframing_instance();
autoframing_instance(obs_data_t* settings, obs_source_t* self);
void load(obs_data_t* data) override;
void migrate(obs_data_t* data, uint64_t version) override;
void update(obs_data_t* data) override;
void properties(obs_properties_t* properties);
uint32_t get_width() override;
uint32_t get_height() override;
virtual void video_tick(float seconds) override;
virtual void video_render(gs_effect_t* effect) override;
private:
void tracking_tick(float seconds);
void switch_provider(tracking_provider provider);
void task_switch_provider(util::threadpool::task_data_t data);
#ifdef ENABLE_NVIDIA
void nvar_facedetection_load();
void nvar_facedetection_unload();
void nvar_facedetection_process();
void nvar_facedetection_properties(obs_properties_t* props);
void nvar_facedetection_update(obs_data_t* data);
#endif
};
class autoframing_factory : public obs::source_factory<streamfx::filter::autoframing::autoframing_factory, streamfx::filter::autoframing::autoframing_instance> {
#ifdef ENABLE_NVIDIA
bool _nvidia_available;
std::shared_ptr<::streamfx::nvidia::cuda::obs> _nvcuda;
std::shared_ptr<::streamfx::nvidia::cv::cv> _nvcvi;
std::shared_ptr<::streamfx::nvidia::ar::ar> _nvar;
#endif
public:
autoframing_factory();
~autoframing_factory() override;
const char* get_name() override;
void get_defaults2(obs_data_t* data) override;
obs_properties_t* get_properties2(autoframing_instance* data) override;
static bool on_manual_open(obs_properties_t* props, obs_property_t* property, void* data);
bool is_provider_available(tracking_provider);
tracking_provider find_ideal_provider();
public: // Singleton
static void initialize();
static void finalize();
static std::shared_ptr<autoframing_factory> instance();
};
} // namespace streamfx::filter::autoframing

View File

@ -0,0 +1,9 @@
# AUTOGENERATED COPYRIGHT HEADER START
# Copyright (C) 2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
# AUTOGENERATED COPYRIGHT HEADER END
cmake_minimum_required(VERSION 3.26)
project("Blur")
list(APPEND CMAKE_MESSAGE_INDENT "[${PROJECT_NAME}] ")
streamfx_add_component("Blur")

View File

@ -0,0 +1,879 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2017-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2019 Cat Stevens <catb0t@protonmail.ch>
// AUTOGENERATED COPYRIGHT HEADER END
#include "filter-blur.hpp"
#include "strings.hpp"
#include "gfx/blur/gfx-blur-box-linear.hpp"
#include "gfx/blur/gfx-blur-box.hpp"
#include "gfx/blur/gfx-blur-dual-filtering.hpp"
#include "gfx/blur/gfx-blur-gaussian-linear.hpp"
#include "gfx/blur/gfx-blur-gaussian.hpp"
#include "obs/gs/gs-helper.hpp"
#include "obs/obs-source-tracker.hpp"
#include "util/util-logging.hpp"
#include "warning-disable.hpp"
#include <cfloat>
#include <cinttypes>
#include <cmath>
#include <map>
#include <stdexcept>
#include "warning-enable.hpp"
// OBS
#include "warning-disable.hpp"
#include <callback/signal.h>
#include <graphics/graphics.h>
#include <graphics/matrix4.h>
#include <util/platform.h>
#include "warning-enable.hpp"
#ifdef _DEBUG
#define ST_PREFIX "<%s> "
#define D_LOG_ERROR(x, ...) P_LOG_ERROR(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#define D_LOG_WARNING(x, ...) P_LOG_WARN(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#define D_LOG_INFO(x, ...) P_LOG_INFO(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#define D_LOG_DEBUG(x, ...) P_LOG_DEBUG(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#else
#define ST_PREFIX "<filter::blur> "
#define D_LOG_ERROR(...) P_LOG_ERROR(ST_PREFIX __VA_ARGS__)
#define D_LOG_WARNING(...) P_LOG_WARN(ST_PREFIX __VA_ARGS__)
#define D_LOG_INFO(...) P_LOG_INFO(ST_PREFIX __VA_ARGS__)
#define D_LOG_DEBUG(...) P_LOG_DEBUG(ST_PREFIX __VA_ARGS__)
#endif
// Translation Strings
#define ST_I18N "Filter.Blur"
#define ST_I18N_TYPE "Filter.Blur.Type"
#define ST_KEY_TYPE "Filter.Blur.Type"
#define ST_I18N_SUBTYPE "Filter.Blur.SubType"
#define ST_KEY_SUBTYPE "Filter.Blur.SubType"
#define ST_I18N_SIZE "Filter.Blur.Size"
#define ST_KEY_SIZE "Filter.Blur.Size"
#define ST_I18N_ANGLE "Filter.Blur.Angle"
#define ST_KEY_ANGLE "Filter.Blur.Angle"
#define ST_CENTER "Filter.Blur.Center"
#define ST_I18N_CENTER_X "Filter.Blur.Center.X"
#define ST_KEY_CENTER_X "Filter.Blur.Center.X"
#define ST_I18N_CENTER_Y "Filter.Blur.Center.Y"
#define ST_KEY_CENTER_Y "Filter.Blur.Center.Y"
#define ST_I18N_STEPSCALE "Filter.Blur.StepScale"
#define ST_KEY_STEPSCALE "Filter.Blur.StepScale"
#define ST_I18N_STEPSCALE_X "Filter.Blur.StepScale.X"
#define ST_KEY_STEPSCALE_X "Filter.Blur.StepScale.X"
#define ST_I18N_STEPSCALE_Y "Filter.Blur.StepScale.Y"
#define ST_KEY_STEPSCALE_Y "Filter.Blur.StepScale.Y"
#define ST_I18N_MASK "Filter.Blur.Mask"
#define ST_KEY_MASK "Filter.Blur.Mask"
#define ST_I18N_MASK_TYPE "Filter.Blur.Mask.Type"
#define ST_KEY_MASK_TYPE "Filter.Blur.Mask.Type"
#define ST_I18N_MASK_TYPE_REGION "Filter.Blur.Mask.Type.Region"
#define ST_I18N_MASK_TYPE_IMAGE "Filter.Blur.Mask.Type.Image"
#define ST_I18N_MASK_TYPE_SOURCE "Filter.Blur.Mask.Type.Source"
#define ST_I18N_MASK_REGION_LEFT "Filter.Blur.Mask.Region.Left"
#define ST_KEY_MASK_REGION_LEFT "Filter.Blur.Mask.Region.Left"
#define ST_I18N_MASK_REGION_RIGHT "Filter.Blur.Mask.Region.Right"
#define ST_KEY_MASK_REGION_RIGHT "Filter.Blur.Mask.Region.Right"
#define ST_I18N_MASK_REGION_TOP "Filter.Blur.Mask.Region.Top"
#define ST_KEY_MASK_REGION_TOP "Filter.Blur.Mask.Region.Top"
#define ST_I18N_MASK_REGION_BOTTOM "Filter.Blur.Mask.Region.Bottom"
#define ST_KEY_MASK_REGION_BOTTOM "Filter.Blur.Mask.Region.Bottom"
#define ST_I18N_MASK_REGION_FEATHER "Filter.Blur.Mask.Region.Feather"
#define ST_KEY_MASK_REGION_FEATHER "Filter.Blur.Mask.Region.Feather"
#define ST_I18N_MASK_REGION_FEATHER_SHIFT "Filter.Blur.Mask.Region.Feather.Shift"
#define ST_KEY_MASK_REGION_FEATHER_SHIFT "Filter.Blur.Mask.Region.Feather.Shift"
#define ST_I18N_MASK_REGION_INVERT "Filter.Blur.Mask.Region.Invert"
#define ST_KEY_MASK_REGION_INVERT "Filter.Blur.Mask.Region.Invert"
#define ST_I18N_MASK_IMAGE "Filter.Blur.Mask.Image"
#define ST_KEY_MASK_IMAGE "Filter.Blur.Mask.Image"
#define ST_I18N_MASK_SOURCE "Filter.Blur.Mask.Source"
#define ST_KEY_MASK_SOURCE "Filter.Blur.Mask.Source"
#define ST_I18N_MASK_COLOR "Filter.Blur.Mask.Color"
#define ST_KEY_MASK_COLOR "Filter.Blur.Mask.Color"
#define ST_I18N_MASK_ALPHA "Filter.Blur.Mask.Alpha"
#define ST_KEY_MASK_ALPHA "Filter.Blur.Mask.Alpha"
#define ST_I18N_MASK_MULTIPLIER "Filter.Blur.Mask.Multiplier"
#define ST_KEY_MASK_MULTIPLIER "Filter.Blur.Mask.Multiplier"
using namespace streamfx::filter::blur;
static constexpr std::string_view HELP_URL = "https://github.com/Xaymar/obs-StreamFX/wiki/Filter-Blur";
struct local_blur_type_t {
std::function<::streamfx::gfx::blur::ifactory&()> fn;
const char* name;
};
struct local_blur_subtype_t {
::streamfx::gfx::blur::type type;
const char* name;
};
static std::map<std::string, local_blur_type_t> list_of_types = {
{"box", {&::streamfx::gfx::blur::box_factory::get, S_BLUR_TYPE_BOX}}, {"box_linear", {&::streamfx::gfx::blur::box_linear_factory::get, S_BLUR_TYPE_BOX_LINEAR}}, {"gaussian", {&::streamfx::gfx::blur::gaussian_factory::get, S_BLUR_TYPE_GAUSSIAN}}, {"gaussian_linear", {&::streamfx::gfx::blur::gaussian_linear_factory::get, S_BLUR_TYPE_GAUSSIAN_LINEAR}}, {"dual_filtering", {&::streamfx::gfx::blur::dual_filtering_factory::get, S_BLUR_TYPE_DUALFILTERING}},
};
static std::map<std::string, local_blur_subtype_t> list_of_subtypes = {
{"area", {::streamfx::gfx::blur::type::Area, S_BLUR_SUBTYPE_AREA}},
{"directional", {::streamfx::gfx::blur::type::Directional, S_BLUR_SUBTYPE_DIRECTIONAL}},
{"rotational", {::streamfx::gfx::blur::type::Rotational, S_BLUR_SUBTYPE_ROTATIONAL}},
{"zoom", {::streamfx::gfx::blur::type::Zoom, S_BLUR_SUBTYPE_ZOOM}},
};
blur_instance::blur_instance(obs_data_t* settings, obs_source_t* self) : obs::source_instance(settings, self), _gfx_util(::streamfx::gfx::util::get()), _source_rendered(false), _output_rendered(false)
{
{
auto gctx = streamfx::obs::gs::context();
// Create RenderTargets
this->_source_rt = std::make_shared<streamfx::obs::gs::texrender>(GS_RGBA, GS_ZS_NONE);
this->_output_rt = std::make_shared<streamfx::obs::gs::texrender>(GS_RGBA, GS_ZS_NONE);
// Load Effects
{
auto file = streamfx::data_file_path("effects/mask.effect");
try {
_effect_mask = streamfx::obs::gs::effect::create(file);
} catch (std::exception& ex) {
DLOG_ERROR("Error loading '%s': %s", file.generic_u8string().c_str(), ex.what());
}
}
}
update(settings);
}
blur_instance::~blur_instance() {}
bool blur_instance::apply_mask_parameters(streamfx::obs::gs::effect effect, gs_texture_t* original_texture, gs_texture_t* blurred_texture)
{
if (effect.has_parameter("image_orig")) {
effect.get_parameter("image_orig").set_texture(original_texture);
}
if (effect.has_parameter("image_blur")) {
effect.get_parameter("image_blur").set_texture(blurred_texture);
}
// Region
if (_mask.type == mask_type::Region) {
if (effect.has_parameter("mask_region_left")) {
effect.get_parameter("mask_region_left").set_float(_mask.region.left);
}
if (effect.has_parameter("mask_region_right")) {
effect.get_parameter("mask_region_right").set_float(_mask.region.right);
}
if (effect.has_parameter("mask_region_top")) {
effect.get_parameter("mask_region_top").set_float(_mask.region.top);
}
if (effect.has_parameter("mask_region_bottom")) {
effect.get_parameter("mask_region_bottom").set_float(_mask.region.bottom);
}
if (effect.has_parameter("mask_region_feather")) {
effect.get_parameter("mask_region_feather").set_float(_mask.region.feather);
}
if (effect.has_parameter("mask_region_feather_shift")) {
effect.get_parameter("mask_region_feather_shift").set_float(_mask.region.feather_shift);
}
}
// Image
if (_mask.type == mask_type::Image) {
if (effect.has_parameter("mask_image")) {
if (_mask.image.texture) {
effect.get_parameter("mask_image").set_texture(_mask.image.texture);
} else {
effect.get_parameter("mask_image").set_texture(nullptr);
}
}
}
// Source
if (_mask.type == mask_type::Source) {
if (effect.has_parameter("mask_image")) {
if (_mask.source.texture) {
effect.get_parameter("mask_image").set_texture(_mask.source.texture);
} else {
effect.get_parameter("mask_image").set_texture(nullptr);
}
}
}
// Shared
if (effect.has_parameter("mask_color")) {
effect.get_parameter("mask_color").set_float4(_mask.color.r, _mask.color.g, _mask.color.b, _mask.color.a);
}
if (effect.has_parameter("mask_multiplier")) {
effect.get_parameter("mask_multiplier").set_float(_mask.multiplier);
}
return true;
}
void blur_instance::load(obs_data_t* settings)
{
update(settings);
}
void blur_instance::migrate(obs_data_t* settings, uint64_t version)
{
// Now we use a fall-through switch to gradually upgrade each known version change.
switch (version) {
case 0:
/// Blur Type
int64_t old_blur = obs_data_get_int(settings, "Filter.Blur.Type");
if (old_blur == 0) { // Box
obs_data_set_string(settings, ST_KEY_TYPE, "box");
} else if (old_blur == 1) { // Gaussian
obs_data_set_string(settings, ST_KEY_TYPE, "gaussian");
} else if (old_blur == 2) { // Bilateral, no longer included.
obs_data_set_string(settings, ST_KEY_TYPE, "box");
} else if (old_blur == 3) { // Box Linear
obs_data_set_string(settings, ST_KEY_TYPE, "box_linear");
} else if (old_blur == 4) { // Gaussian Linear
obs_data_set_string(settings, ST_KEY_TYPE, "gaussian_linear");
} else {
obs_data_set_string(settings, ST_KEY_TYPE, "box");
}
obs_data_unset_user_value(settings, "Filter.Blur.Type");
/// Directional Blur
bool directional = obs_data_get_bool(settings, "Filter.Blur.Directional");
if (directional) {
obs_data_set_string(settings, ST_KEY_SUBTYPE, "directional");
} else {
obs_data_set_string(settings, ST_KEY_SUBTYPE, "area");
}
obs_data_unset_user_value(settings, "Filter.Blur.Directional");
/// Directional Blur Angle
double_t angle = obs_data_get_double(settings, "Filter.Blur.Directional.Angle");
obs_data_set_double(settings, ST_KEY_ANGLE, angle);
obs_data_unset_user_value(settings, "Filter.Blur.Directional.Angle");
}
}
void blur_instance::update(obs_data_t* settings)
{
{ // Blur Type
const char* blur_type = obs_data_get_string(settings, ST_KEY_TYPE);
const char* blur_subtype = obs_data_get_string(settings, ST_KEY_SUBTYPE);
const char* last_blur_type = obs_data_get_string(settings, ST_KEY_TYPE ".last");
auto type_found = list_of_types.find(blur_type);
if (type_found != list_of_types.end()) {
auto subtype_found = list_of_subtypes.find(blur_subtype);
if (subtype_found != list_of_subtypes.end()) {
if ((strcmp(last_blur_type, blur_type) != 0) || (_blur->get_type() != subtype_found->second.type)) {
if (type_found->second.fn().is_type_supported(subtype_found->second.type)) {
_blur = type_found->second.fn().create(subtype_found->second.type);
}
}
}
}
}
{ // Blur Parameters
this->_blur_size = obs_data_get_double(settings, ST_KEY_SIZE);
this->_blur_angle = obs_data_get_double(settings, ST_KEY_ANGLE);
this->_blur_center.first = obs_data_get_double(settings, ST_KEY_CENTER_X) / 100.0;
this->_blur_center.second = obs_data_get_double(settings, ST_KEY_CENTER_Y) / 100.0;
// Scaling
this->_blur_step_scaling = obs_data_get_bool(settings, ST_KEY_STEPSCALE);
this->_blur_step_scale.first = obs_data_get_double(settings, ST_KEY_STEPSCALE_X) / 100.0;
this->_blur_step_scale.second = obs_data_get_double(settings, ST_KEY_STEPSCALE_Y) / 100.0;
}
{ // Masking
_mask.enabled = obs_data_get_bool(settings, ST_KEY_MASK);
if (_mask.enabled) {
_mask.type = static_cast<mask_type>(obs_data_get_int(settings, ST_KEY_MASK_TYPE));
switch (_mask.type) {
case mask_type::Region:
_mask.region.left = float(obs_data_get_double(settings, ST_KEY_MASK_REGION_LEFT) / 100.0);
_mask.region.top = float(obs_data_get_double(settings, ST_KEY_MASK_REGION_TOP) / 100.0);
_mask.region.right = 1.0f - float(obs_data_get_double(settings, ST_KEY_MASK_REGION_RIGHT) / 100.0);
_mask.region.bottom = 1.0f - float(obs_data_get_double(settings, ST_KEY_MASK_REGION_BOTTOM) / 100.0);
_mask.region.feather = float(obs_data_get_double(settings, ST_KEY_MASK_REGION_FEATHER) / 100.0);
_mask.region.feather_shift = float(obs_data_get_double(settings, ST_KEY_MASK_REGION_FEATHER_SHIFT) / 100.0);
_mask.region.invert = obs_data_get_bool(settings, ST_KEY_MASK_REGION_INVERT);
break;
case mask_type::Image:
_mask.image.path = obs_data_get_string(settings, ST_KEY_MASK_IMAGE);
break;
case mask_type::Source:
_mask.source.name = obs_data_get_string(settings, ST_KEY_MASK_SOURCE);
break;
}
if ((_mask.type == mask_type::Image) || (_mask.type == mask_type::Source)) {
uint32_t color = static_cast<uint32_t>(obs_data_get_int(settings, ST_KEY_MASK_COLOR));
_mask.color.r = static_cast<float>((color >> 0) & 0xFF) / 255.0f;
_mask.color.g = static_cast<float>((color >> 8) & 0xFF) / 255.0f;
_mask.color.b = static_cast<float>((color >> 16) & 0xFF) / 255.0f;
_mask.color.a = static_cast<float>(obs_data_get_double(settings, ST_KEY_MASK_ALPHA));
_mask.multiplier = float(obs_data_get_double(settings, ST_KEY_MASK_MULTIPLIER));
}
}
}
}
void blur_instance::video_tick(float)
{
// Blur
if (_blur) {
_blur->set_size(_blur_size);
if (_blur_step_scaling) {
_blur->set_step_scale(_blur_step_scale.first, _blur_step_scale.second);
} else {
_blur->set_step_scale(1.0, 1.0);
}
if ((_blur->get_type() == ::streamfx::gfx::blur::type::Directional) || (_blur->get_type() == ::streamfx::gfx::blur::type::Rotational)) {
auto obj = std::dynamic_pointer_cast<::streamfx::gfx::blur::base_angle>(_blur);
obj->set_angle(_blur_angle);
}
if ((_blur->get_type() == ::streamfx::gfx::blur::type::Zoom) || (_blur->get_type() == ::streamfx::gfx::blur::type::Rotational)) {
auto obj = std::dynamic_pointer_cast<::streamfx::gfx::blur::base_center>(_blur);
obj->set_center(_blur_center.first, _blur_center.second);
}
}
// Load Mask
if (_mask.type == mask_type::Image) {
if (_mask.image.path_old != _mask.image.path) {
try {
_mask.image.texture = std::make_shared<streamfx::obs::gs::texture>(_mask.image.path);
_mask.image.path_old = _mask.image.path;
} catch (...) {
DLOG_ERROR("<filter-blur> Instance '%s' failed to load image '%s'.", obs_source_get_name(_self), _mask.image.path.c_str());
}
}
} else if (_mask.type == mask_type::Source) {
if (_mask.source.name_old != _mask.source.name) {
try {
_mask.source.source_texture = std::make_shared<streamfx::gfx::source_texture>(::streamfx::obs::source{_mask.source.name}, ::streamfx::obs::source{_self, false});
_mask.source.is_scene = (obs_scene_from_source(_mask.source.source_texture->get_object()) != nullptr);
_mask.source.name_old = _mask.source.name;
} catch (...) {
DLOG_ERROR("<filter-blur> Instance '%s' failed to grab source '%s'.", obs_source_get_name(_self), _mask.source.name.c_str());
}
}
}
_source_rendered = false;
_output_rendered = false;
}
void blur_instance::video_render(gs_effect_t* effect)
{
obs_source_t* parent = obs_filter_get_parent(this->_self);
obs_source_t* target = obs_filter_get_target(this->_self);
gs_effect_t* defaultEffect = obs_get_base_effect(obs_base_effect::OBS_EFFECT_DEFAULT);
uint32_t baseW = obs_source_get_base_width(target);
uint32_t baseH = obs_source_get_base_height(target);
// Verify that we can actually run first.
if (!target || !parent || !this->_self || !this->_blur || (baseW == 0) || (baseH == 0)) {
obs_source_skip_video_filter(this->_self);
return;
}
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
streamfx::obs::gs::debug_marker gdmp{streamfx::obs::gs::debug_color_source, "Blur '%s'", obs_source_get_name(_self)};
#endif
if (!_source_rendered) {
// Source To Texture
{
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
streamfx::obs::gs::debug_marker gdm{streamfx::obs::gs::debug_color_cache, "Cache"};
#endif
if (obs_source_process_filter_begin(this->_self, GS_RGBA, OBS_ALLOW_DIRECT_RENDERING)) {
{
auto op = this->_source_rt->render(baseW, baseH);
gs_blend_state_push();
gs_reset_blend_state();
gs_enable_blending(false);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
gs_set_cull_mode(GS_NEITHER);
gs_enable_color(true, true, true, true);
gs_enable_depth_test(false);
gs_depth_function(GS_ALWAYS);
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_KEEP, GS_KEEP, GS_KEEP);
// Orthographic Camera and clear RenderTarget.
gs_ortho(0, static_cast<float>(baseW), 0, static_cast<float>(baseH), -1., 1.);
//gs_clear(GS_CLEAR_COLOR | GS_CLEAR_DEPTH, &black, 0, 0);
// Render
obs_source_process_filter_end(this->_self, defaultEffect, baseW, baseH);
gs_blend_state_pop();
}
_source_texture = this->_source_rt->get_texture();
if (!_source_texture) {
obs_source_skip_video_filter(this->_self);
return;
}
} else {
obs_source_skip_video_filter(this->_self);
return;
}
}
_source_rendered = true;
}
if (!_output_rendered) {
{
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
streamfx::obs::gs::debug_marker gdm{streamfx::obs::gs::debug_color_convert, "Blur"};
#endif
_blur->set_input(_source_texture);
_output_texture = _blur->render();
}
// Mask
if (_mask.enabled) {
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
streamfx::obs::gs::debug_marker gdm{streamfx::obs::gs::debug_color_convert, "Mask"};
#endif
gs_blend_state_push();
gs_reset_blend_state();
gs_enable_color(true, true, true, true);
gs_enable_blending(false);
gs_enable_depth_test(false);
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_set_cull_mode(GS_NEITHER);
gs_depth_function(GS_ALWAYS);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_ZERO, GS_ZERO, GS_ZERO);
std::string technique = "";
switch (this->_mask.type) {
case mask_type::Region:
if (this->_mask.region.feather > std::numeric_limits<float>::epsilon()) {
if (this->_mask.region.invert) {
technique = "RegionFeatherInverted";
} else {
technique = "RegionFeather";
}
} else {
if (this->_mask.region.invert) {
technique = "RegionInverted";
} else {
technique = "Region";
}
}
break;
case mask_type::Image:
case mask_type::Source:
technique = "Image";
break;
}
if (_mask.source.source_texture) {
uint32_t source_width = obs_source_get_width(this->_mask.source.source_texture->get_object());
uint32_t source_height = obs_source_get_height(this->_mask.source.source_texture->get_object());
if (source_width == 0) {
source_width = baseW;
}
if (source_height == 0) {
source_height = baseH;
}
if (this->_mask.source.is_scene) {
obs_video_info ovi;
if (obs_get_video_info(&ovi)) {
source_width = ovi.base_width;
source_height = ovi.base_height;
}
}
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
streamfx::obs::gs::debug_marker gdm{streamfx::obs::gs::debug_color_capture, "Capture '%s'", obs_source_get_name(_mask.source.source_texture->get_object())};
#endif
this->_mask.source.texture = this->_mask.source.source_texture->render(source_width, source_height);
}
apply_mask_parameters(_effect_mask, _source_texture->get_object(), _output_texture->get_object());
try {
auto op = this->_output_rt->render(baseW, baseH);
gs_ortho(0, 1, 0, 1, -1, 1);
// Render
while (gs_effect_loop(_effect_mask.get_object(), technique.c_str())) {
_gfx_util->draw_fullscreen_triangle();
}
} catch (const std::exception&) {
gs_blend_state_pop();
obs_source_skip_video_filter(this->_self);
return;
}
gs_blend_state_pop();
if (!(_output_texture = this->_output_rt->get_texture())) {
obs_source_skip_video_filter(this->_self);
return;
}
}
_output_rendered = true;
}
// Draw source
{
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
streamfx::obs::gs::debug_marker gdm{streamfx::obs::gs::debug_color_render, "Render"};
#endif
// It is important that we do not modify the blend state here, as it is set correctly by OBS
gs_set_cull_mode(GS_NEITHER);
gs_enable_color(true, true, true, true);
gs_enable_depth_test(false);
gs_depth_function(GS_ALWAYS);
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_ZERO, GS_ZERO, GS_ZERO);
gs_effect_t* finalEffect = effect ? effect : defaultEffect;
const char* technique = "Draw";
gs_eparam_t* param = gs_effect_get_param_by_name(finalEffect, "image");
if (!param) {
DLOG_ERROR("<filter-blur:%s> Failed to set image param.", obs_source_get_name(this->_self));
obs_source_skip_video_filter(_self);
return;
} else {
gs_effect_set_texture(param, *_output_texture);
}
while (gs_effect_loop(finalEffect, technique)) {
gs_draw_sprite(*_output_texture, 0, baseW, baseH);
}
}
}
blur_factory::blur_factory()
{
_info.id = S_PREFIX "filter-blur";
_info.type = OBS_SOURCE_TYPE_FILTER;
_info.output_flags = OBS_SOURCE_VIDEO;
support_size(false);
finish_setup();
register_proxy("obs-stream-effects-filter-blur");
}
blur_factory::~blur_factory() {}
const char* blur_factory::get_name()
{
return D_TRANSLATE(ST_I18N);
}
void blur_factory::get_defaults2(obs_data_t* settings)
{
// Type, Subtype
obs_data_set_default_string(settings, ST_KEY_TYPE, "box");
obs_data_set_default_string(settings, ST_KEY_SUBTYPE, "area");
// Parameters
obs_data_set_default_int(settings, ST_KEY_SIZE, 5);
obs_data_set_default_double(settings, ST_KEY_ANGLE, 0.);
obs_data_set_default_double(settings, ST_KEY_CENTER_X, 50.);
obs_data_set_default_double(settings, ST_KEY_CENTER_Y, 50.);
obs_data_set_default_bool(settings, ST_KEY_STEPSCALE, false);
obs_data_set_default_double(settings, ST_KEY_STEPSCALE_X, 1.);
obs_data_set_default_double(settings, ST_KEY_STEPSCALE_Y, 1.);
// Masking
obs_data_set_default_bool(settings, ST_KEY_MASK, false);
obs_data_set_default_int(settings, ST_KEY_MASK_TYPE, static_cast<int64_t>(mask_type::Region));
obs_data_set_default_double(settings, ST_KEY_MASK_REGION_LEFT, 0.0);
obs_data_set_default_double(settings, ST_KEY_MASK_REGION_RIGHT, 0.0);
obs_data_set_default_double(settings, ST_KEY_MASK_REGION_TOP, 0.0);
obs_data_set_default_double(settings, ST_KEY_MASK_REGION_BOTTOM, 0.0);
obs_data_set_default_double(settings, ST_KEY_MASK_REGION_FEATHER, 0.0);
obs_data_set_default_double(settings, ST_KEY_MASK_REGION_FEATHER_SHIFT, 0.0);
obs_data_set_default_bool(settings, ST_KEY_MASK_REGION_INVERT, false);
obs_data_set_default_string(settings, ST_KEY_MASK_IMAGE, reinterpret_cast<const char*>(streamfx::data_file_path("white.png").generic_u8string().c_str()));
obs_data_set_default_string(settings, ST_KEY_MASK_SOURCE, "");
obs_data_set_default_int(settings, ST_KEY_MASK_COLOR, 0xFFFFFFFFull);
obs_data_set_default_double(settings, ST_KEY_MASK_MULTIPLIER, 1.0);
}
bool modified_properties(void*, obs_properties_t* props, obs_property* prop, obs_data_t* settings) noexcept
{
try {
obs_property_t* p;
const char* propname = obs_property_name(prop);
const char* vtype = obs_data_get_string(settings, ST_KEY_TYPE);
const char* vsubtype = obs_data_get_string(settings, ST_KEY_SUBTYPE);
// Find new Type
auto type_found = list_of_types.find(vtype);
if (type_found == list_of_types.end()) {
return false;
}
// Find new Subtype
auto subtype_found = list_of_subtypes.find(vsubtype);
if (subtype_found == list_of_subtypes.end()) {
return false;
}
// Blur Type
if (strcmp(propname, ST_KEY_TYPE) == 0) {
obs_property_t* prop_subtype = obs_properties_get(props, ST_KEY_SUBTYPE);
/// Disable unsupported items.
std::size_t subvalue_idx = 0;
for (std::size_t idx = 0, edx = obs_property_list_item_count(prop_subtype); idx < edx; idx++) {
const char* subtype = obs_property_list_item_string(prop_subtype, idx);
bool disabled = false;
auto subtype_found_idx = list_of_subtypes.find(subtype);
if (subtype_found_idx != list_of_subtypes.end()) {
disabled = !type_found->second.fn().is_type_supported(subtype_found_idx->second.type);
} else {
disabled = true;
}
obs_property_list_item_disable(prop_subtype, idx, disabled);
if (strcmp(subtype, vsubtype) == 0) {
subvalue_idx = idx;
}
}
/// Ensure that there is a valid item selected.
if (obs_property_list_item_disabled(prop_subtype, subvalue_idx)) {
for (std::size_t idx = 0, edx = obs_property_list_item_count(prop_subtype); idx < edx; idx++) {
if (!obs_property_list_item_disabled(prop_subtype, idx)) {
obs_data_set_string(settings, ST_KEY_SUBTYPE, obs_property_list_item_string(prop_subtype, idx));
// Find new Subtype
auto subtype_found2 = list_of_subtypes.find(vsubtype);
if (subtype_found2 == list_of_subtypes.end()) {
subtype_found = list_of_subtypes.end();
} else {
subtype_found = subtype_found2;
}
break;
}
}
}
}
// Blur Sub-Type
{
bool has_angle_support = (subtype_found->second.type == ::streamfx::gfx::blur::type::Directional) || (subtype_found->second.type == ::streamfx::gfx::blur::type::Rotational);
bool has_center_support = (subtype_found->second.type == ::streamfx::gfx::blur::type::Rotational) || (subtype_found->second.type == ::streamfx::gfx::blur::type::Zoom);
bool has_stepscale_support = type_found->second.fn().is_step_scale_supported(subtype_found->second.type);
bool show_scaling = obs_data_get_bool(settings, ST_KEY_STEPSCALE) && has_stepscale_support;
/// Size
p = obs_properties_get(props, ST_KEY_SIZE);
obs_property_float_set_limits(p, type_found->second.fn().get_min_size(subtype_found->second.type), type_found->second.fn().get_max_size(subtype_found->second.type), type_found->second.fn().get_step_size(subtype_found->second.type));
/// Angle
p = obs_properties_get(props, ST_KEY_ANGLE);
obs_property_set_visible(p, has_angle_support);
obs_property_float_set_limits(p, type_found->second.fn().get_min_angle(subtype_found->second.type), type_found->second.fn().get_max_angle(subtype_found->second.type), type_found->second.fn().get_step_angle(subtype_found->second.type));
/// Center, Radius
obs_property_set_visible(obs_properties_get(props, ST_KEY_CENTER_X), has_center_support);
obs_property_set_visible(obs_properties_get(props, ST_KEY_CENTER_Y), has_center_support);
/// Step Scaling
obs_property_set_visible(obs_properties_get(props, ST_KEY_STEPSCALE), has_stepscale_support);
p = obs_properties_get(props, ST_KEY_STEPSCALE_X);
obs_property_set_visible(p, show_scaling);
obs_property_float_set_limits(p, type_found->second.fn().get_min_step_scale_x(subtype_found->second.type), type_found->second.fn().get_max_step_scale_x(subtype_found->second.type), type_found->second.fn().get_step_step_scale_x(subtype_found->second.type));
p = obs_properties_get(props, ST_KEY_STEPSCALE_Y);
obs_property_set_visible(p, show_scaling);
obs_property_float_set_limits(p, type_found->second.fn().get_min_step_scale_x(subtype_found->second.type), type_found->second.fn().get_max_step_scale_x(subtype_found->second.type), type_found->second.fn().get_step_step_scale_x(subtype_found->second.type));
}
{ // Masking
using namespace ::streamfx::gfx::blur;
bool show_mask = obs_data_get_bool(settings, ST_KEY_MASK);
mask_type mtype = static_cast<mask_type>(obs_data_get_int(settings, ST_KEY_MASK_TYPE));
bool show_region = (mtype == mask_type::Region) && show_mask;
bool show_image = (mtype == mask_type::Image) && show_mask;
bool show_source = (mtype == mask_type::Source) && show_mask;
obs_property_set_visible(obs_properties_get(props, ST_KEY_MASK_TYPE), show_mask);
obs_property_set_visible(obs_properties_get(props, ST_KEY_MASK_REGION_LEFT), show_region);
obs_property_set_visible(obs_properties_get(props, ST_KEY_MASK_REGION_TOP), show_region);
obs_property_set_visible(obs_properties_get(props, ST_KEY_MASK_REGION_RIGHT), show_region);
obs_property_set_visible(obs_properties_get(props, ST_KEY_MASK_REGION_BOTTOM), show_region);
obs_property_set_visible(obs_properties_get(props, ST_KEY_MASK_REGION_FEATHER), show_region);
obs_property_set_visible(obs_properties_get(props, ST_KEY_MASK_REGION_FEATHER_SHIFT), show_region);
obs_property_set_visible(obs_properties_get(props, ST_KEY_MASK_REGION_INVERT), show_region);
obs_property_set_visible(obs_properties_get(props, ST_KEY_MASK_IMAGE), show_image);
obs_property_set_visible(obs_properties_get(props, ST_KEY_MASK_SOURCE), show_source);
obs_property_set_visible(obs_properties_get(props, ST_KEY_MASK_COLOR), show_image || show_source);
obs_property_set_visible(obs_properties_get(props, ST_KEY_MASK_ALPHA), show_image || show_source);
obs_property_set_visible(obs_properties_get(props, ST_KEY_MASK_MULTIPLIER), show_image || show_source);
}
return true;
} catch (...) {
DLOG_ERROR("Unexpected exception in modified_properties callback.");
return false;
}
}
obs_properties_t* blur_factory::get_properties2(blur_instance* data)
{
obs_properties_t* pr = obs_properties_create();
obs_property_t* p = NULL;
{
obs_properties_add_button2(pr, S_MANUAL_OPEN, D_TRANSLATE(S_MANUAL_OPEN), streamfx::filter::blur::blur_factory::on_manual_open, nullptr);
}
// Blur Type and Sub-Type
{
p = obs_properties_add_list(pr, ST_KEY_TYPE, D_TRANSLATE(ST_I18N_TYPE), OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_STRING);
obs_property_set_modified_callback2(p, modified_properties, this);
obs_property_list_add_string(p, D_TRANSLATE(S_BLUR_TYPE_BOX), "box");
obs_property_list_add_string(p, D_TRANSLATE(S_BLUR_TYPE_BOX_LINEAR), "box_linear");
obs_property_list_add_string(p, D_TRANSLATE(S_BLUR_TYPE_GAUSSIAN), "gaussian");
obs_property_list_add_string(p, D_TRANSLATE(S_BLUR_TYPE_GAUSSIAN_LINEAR), "gaussian_linear");
obs_property_list_add_string(p, D_TRANSLATE(S_BLUR_TYPE_DUALFILTERING), "dual_filtering");
p = obs_properties_add_list(pr, ST_KEY_SUBTYPE, D_TRANSLATE(ST_I18N_SUBTYPE), OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_STRING);
obs_property_set_modified_callback2(p, modified_properties, this);
obs_property_list_add_string(p, D_TRANSLATE(S_BLUR_SUBTYPE_AREA), "area");
obs_property_list_add_string(p, D_TRANSLATE(S_BLUR_SUBTYPE_DIRECTIONAL), "directional");
obs_property_list_add_string(p, D_TRANSLATE(S_BLUR_SUBTYPE_ROTATIONAL), "rotational");
obs_property_list_add_string(p, D_TRANSLATE(S_BLUR_SUBTYPE_ZOOM), "zoom");
}
// Blur Parameters
{
p = obs_properties_add_float_slider(pr, ST_KEY_SIZE, D_TRANSLATE(ST_I18N_SIZE), 1, 32767, 1);
p = obs_properties_add_float_slider(pr, ST_KEY_ANGLE, D_TRANSLATE(ST_I18N_ANGLE), -180.0, 180.0, 0.01);
p = obs_properties_add_float_slider(pr, ST_KEY_CENTER_X, D_TRANSLATE(ST_I18N_CENTER_X), 0.00, 100.0, 0.01);
p = obs_properties_add_float_slider(pr, ST_KEY_CENTER_Y, D_TRANSLATE(ST_I18N_CENTER_Y), 0.00, 100.0, 0.01);
p = obs_properties_add_bool(pr, ST_KEY_STEPSCALE, D_TRANSLATE(ST_I18N_STEPSCALE));
obs_property_set_modified_callback2(p, modified_properties, this);
p = obs_properties_add_float_slider(pr, ST_KEY_STEPSCALE_X, D_TRANSLATE(ST_I18N_STEPSCALE_X), 0.0, 1000.0, 0.01);
p = obs_properties_add_float_slider(pr, ST_KEY_STEPSCALE_Y, D_TRANSLATE(ST_I18N_STEPSCALE_Y), 0.0, 1000.0, 0.01);
}
// Masking
{
p = obs_properties_add_bool(pr, ST_KEY_MASK, D_TRANSLATE(ST_I18N_MASK));
obs_property_set_modified_callback2(p, modified_properties, this);
p = obs_properties_add_list(pr, ST_KEY_MASK_TYPE, D_TRANSLATE(ST_I18N_MASK_TYPE), OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_INT);
obs_property_set_modified_callback2(p, modified_properties, this);
obs_property_list_add_int(p, D_TRANSLATE(ST_I18N_MASK_TYPE_REGION), static_cast<int64_t>(mask_type::Region));
obs_property_list_add_int(p, D_TRANSLATE(ST_I18N_MASK_TYPE_IMAGE), static_cast<int64_t>(mask_type::Image));
obs_property_list_add_int(p, D_TRANSLATE(ST_I18N_MASK_TYPE_SOURCE), static_cast<int64_t>(mask_type::Source));
/// Region
p = obs_properties_add_float_slider(pr, ST_KEY_MASK_REGION_LEFT, D_TRANSLATE(ST_I18N_MASK_REGION_LEFT), 0.0, 100.0, 0.01);
p = obs_properties_add_float_slider(pr, ST_KEY_MASK_REGION_TOP, D_TRANSLATE(ST_I18N_MASK_REGION_TOP), 0.0, 100.0, 0.01);
p = obs_properties_add_float_slider(pr, ST_KEY_MASK_REGION_RIGHT, D_TRANSLATE(ST_I18N_MASK_REGION_RIGHT), 0.0, 100.0, 0.01);
p = obs_properties_add_float_slider(pr, ST_KEY_MASK_REGION_BOTTOM, D_TRANSLATE(ST_I18N_MASK_REGION_BOTTOM), 0.0, 100.0, 0.01);
p = obs_properties_add_float_slider(pr, ST_KEY_MASK_REGION_FEATHER, D_TRANSLATE(ST_I18N_MASK_REGION_FEATHER), 0.0, 50.0, 0.01);
p = obs_properties_add_float_slider(pr, ST_KEY_MASK_REGION_FEATHER_SHIFT, D_TRANSLATE(ST_I18N_MASK_REGION_FEATHER_SHIFT), -100.0, 100.0, 0.01);
p = obs_properties_add_bool(pr, ST_KEY_MASK_REGION_INVERT, D_TRANSLATE(ST_I18N_MASK_REGION_INVERT));
/// Image
{
std::string filter = translate_string("%s (%s);;* (*.*)", D_TRANSLATE(S_FILETYPE_IMAGES), S_FILEFILTERS_TEXTURE);
_translation_cache.push_back(filter);
p = obs_properties_add_path(pr, ST_KEY_MASK_IMAGE, D_TRANSLATE(ST_I18N_MASK_IMAGE), OBS_PATH_FILE, _translation_cache.back().c_str(), nullptr);
}
/// Source
p = obs_properties_add_list(pr, ST_KEY_MASK_SOURCE, D_TRANSLATE(ST_I18N_MASK_SOURCE), OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_STRING);
obs_property_list_add_string(p, "", "");
obs::source_tracker::instance()->enumerate(
[&p](std::string name, ::streamfx::obs::source) {
obs_property_list_add_string(p, std::string(name + " (Source)").c_str(), name.c_str());
return false;
},
obs::source_tracker::filter_video_sources);
obs::source_tracker::instance()->enumerate(
[&p](std::string name, ::streamfx::obs::source) {
obs_property_list_add_string(p, std::string(name + " (Scene)").c_str(), name.c_str());
return false;
},
obs::source_tracker::filter_scenes);
/// Shared
p = obs_properties_add_color(pr, ST_KEY_MASK_COLOR, D_TRANSLATE(ST_I18N_MASK_COLOR));
p = obs_properties_add_float_slider(pr, ST_KEY_MASK_ALPHA, D_TRANSLATE(ST_I18N_MASK_ALPHA), 0.0, 100.0, 0.1);
p = obs_properties_add_float_slider(pr, ST_KEY_MASK_MULTIPLIER, D_TRANSLATE(ST_I18N_MASK_MULTIPLIER), 0.0, 10.0, 0.01);
}
return pr;
}
std::string blur_factory::translate_string(const char* format, ...)
{
va_list vargs;
va_start(vargs, format);
std::vector<char> buffer(2048);
std::size_t len = static_cast<size_t>(vsnprintf(buffer.data(), buffer.size(), format, vargs));
va_end(vargs);
return std::string(buffer.data(), buffer.data() + len);
}
bool blur_factory::on_manual_open(obs_properties_t* props, obs_property_t* property, void* data)
{
try {
streamfx::open_url(HELP_URL);
return false;
} catch (const std::exception& ex) {
D_LOG_ERROR("Failed to open manual due to error: %s", ex.what());
return false;
} catch (...) {
D_LOG_ERROR("Failed to open manual due to unknown error.", "");
return false;
}
}
std::shared_ptr<blur_factory> blur_factory::instance()
{
static std::weak_ptr<blur_factory> winst;
static std::mutex mtx;
std::unique_lock<decltype(mtx)> lock(mtx);
auto instance = winst.lock();
if (!instance) {
instance = std::shared_ptr<blur_factory>(new blur_factory());
winst = instance;
}
return instance;
}
static std::shared_ptr<blur_factory> loader_instance;
static auto loader = streamfx::component(
"blur",
[]() { // Initializer
loader_instance = blur_factory::instance();
},
[]() { // Finalizer
loader_instance.reset();
},
{"core::threadpool", "core::source_tracker"});

View File

@ -0,0 +1,123 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2017-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "common.hpp"
#include "gfx/blur/gfx-blur-base.hpp"
#include "gfx/gfx-source-texture.hpp"
#include "gfx/gfx-util.hpp"
#include "obs/gs/gs-effect.hpp"
#include "obs/gs/gs-helper.hpp"
#include "obs/gs/gs-texrender.hpp"
#include "obs/gs/gs-texture.hpp"
#include "obs/obs-source-factory.hpp"
#include "warning-disable.hpp"
#include <chrono>
#include <functional>
#include <list>
#include <map>
#include "warning-enable.hpp"
namespace streamfx::filter::blur {
enum class mask_type : int64_t {
Region,
Image,
Source,
};
class blur_instance : public obs::source_instance {
// Effects
streamfx::obs::gs::effect _effect_mask;
std::shared_ptr<streamfx::gfx::util> _gfx_util;
// Input
std::shared_ptr<streamfx::obs::gs::texrender> _source_rt;
std::shared_ptr<streamfx::obs::gs::texture> _source_texture;
bool _source_rendered;
// Rendering
std::shared_ptr<streamfx::obs::gs::texture> _output_texture;
std::shared_ptr<streamfx::obs::gs::texrender> _output_rt;
bool _output_rendered;
// Blur
std::shared_ptr<::streamfx::gfx::blur::base> _blur;
double_t _blur_size;
double_t _blur_angle;
std::pair<double_t, double_t> _blur_center;
bool _blur_step_scaling;
std::pair<double_t, double_t> _blur_step_scale;
// Masking
struct {
bool enabled;
mask_type type;
struct {
float left;
float top;
float right;
float bottom;
float feather;
float feather_shift;
bool invert;
} region;
struct {
std::string path;
std::string path_old;
std::shared_ptr<streamfx::obs::gs::texture> texture;
} image;
struct {
std::string name_old;
std::string name;
bool is_scene;
std::shared_ptr<streamfx::gfx::source_texture> source_texture;
std::shared_ptr<streamfx::obs::gs::texture> texture;
} source;
struct {
float r;
float g;
float b;
float a;
} color;
float multiplier;
} _mask;
public:
blur_instance(obs_data_t* settings, obs_source_t* self);
~blur_instance();
public:
virtual void load(obs_data_t* settings) override;
virtual void migrate(obs_data_t* settings, uint64_t version) override;
virtual void update(obs_data_t* settings) override;
virtual void video_tick(float time) override;
virtual void video_render(gs_effect_t* effect) override;
private:
bool apply_mask_parameters(streamfx::obs::gs::effect effect, gs_texture_t* original_texture, gs_texture_t* blurred_texture);
};
class blur_factory : public obs::source_factory<filter::blur::blur_factory, filter::blur::blur_instance> {
std::vector<std::string> _translation_cache;
public:
blur_factory();
virtual ~blur_factory();
virtual const char* get_name() override;
virtual void get_defaults2(obs_data_t* settings) override;
virtual obs_properties_t* get_properties2(filter::blur::blur_instance* data) override;
std::string translate_string(const char* format, ...);
static bool on_manual_open(obs_properties_t* props, obs_property_t* property, void* data);
public: // Singleton
static std::shared_ptr<blur_factory> instance();
};
} // namespace streamfx::filter::blur

View File

@ -0,0 +1,57 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2019-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#include "gfx-blur-base.hpp"
#include "warning-disable.hpp"
#include <stdexcept>
#include "warning-enable.hpp"
void streamfx::gfx::blur::base::set_step_scale_x(double_t v)
{
this->set_step_scale(v, this->get_step_scale_y());
}
void streamfx::gfx::blur::base::set_step_scale_y(double_t v)
{
this->set_step_scale(this->get_step_scale_x(), v);
}
double_t streamfx::gfx::blur::base::get_step_scale_x()
{
double_t x, y;
this->get_step_scale(x, y);
return x;
}
double_t streamfx::gfx::blur::base::get_step_scale_y()
{
double_t x, y;
this->get_step_scale(x, y);
return y;
}
void streamfx::gfx::blur::base_center::set_center_x(double_t v)
{
this->set_center(v, this->get_center_y());
}
void streamfx::gfx::blur::base_center::set_center_y(double_t v)
{
this->set_center(this->get_center_x(), v);
}
double_t streamfx::gfx::blur::base_center::get_center_x()
{
double_t x, y;
this->get_center(x, y);
return x;
}
double_t streamfx::gfx::blur::base_center::get_center_y()
{
double_t x, y;
this->get_center(x, y);
return y;
}

View File

@ -0,0 +1,109 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2019-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "common.hpp"
#include "obs/gs/gs-texture.hpp"
namespace streamfx::gfx {
namespace blur {
enum class type : int64_t {
Invalid = -1,
Area,
Directional,
Rotational,
Zoom,
};
class base {
public:
virtual ~base() {}
virtual void set_input(std::shared_ptr<::streamfx::obs::gs::texture> texture) = 0;
virtual ::streamfx::gfx::blur::type get_type() = 0;
virtual double_t get_size() = 0;
virtual void set_size(double_t width) = 0;
virtual void set_step_scale(double_t x, double_t y) = 0;
virtual void set_step_scale_x(double_t v);
virtual void set_step_scale_y(double_t v);
virtual void get_step_scale(double_t& x, double_t& y) = 0;
virtual double_t get_step_scale_x();
virtual double_t get_step_scale_y();
virtual std::shared_ptr<::streamfx::obs::gs::texture> render() = 0;
virtual std::shared_ptr<::streamfx::obs::gs::texture> get() = 0;
};
class base_angle {
public:
virtual ~base_angle() {}
virtual double_t get_angle() = 0;
virtual void set_angle(double_t angle) = 0;
};
class base_center {
public:
virtual ~base_center() {}
virtual void set_center(double_t x, double_t y) = 0;
virtual void set_center_x(double_t v);
virtual void set_center_y(double_t v);
virtual void get_center(double_t& x, double_t& y) = 0;
virtual double_t get_center_x();
virtual double_t get_center_y();
};
class ifactory {
public:
virtual ~ifactory() {}
virtual bool is_type_supported(::streamfx::gfx::blur::type type) = 0;
virtual std::shared_ptr<::streamfx::gfx::blur::base> create(::streamfx::gfx::blur::type type) = 0;
virtual double_t get_min_size(::streamfx::gfx::blur::type type) = 0;
virtual double_t get_step_size(::streamfx::gfx::blur::type type) = 0;
virtual double_t get_max_size(::streamfx::gfx::blur::type type) = 0;
virtual double_t get_min_angle(::streamfx::gfx::blur::type type) = 0;
virtual double_t get_step_angle(::streamfx::gfx::blur::type type) = 0;
virtual double_t get_max_angle(::streamfx::gfx::blur::type type) = 0;
virtual bool is_step_scale_supported(::streamfx::gfx::blur::type type) = 0;
virtual double_t get_min_step_scale_x(::streamfx::gfx::blur::type type) = 0;
virtual double_t get_step_step_scale_x(::streamfx::gfx::blur::type type) = 0;
virtual double_t get_max_step_scale_x(::streamfx::gfx::blur::type type) = 0;
virtual double_t get_min_step_scale_y(::streamfx::gfx::blur::type type) = 0;
virtual double_t get_step_step_scale_y(::streamfx::gfx::blur::type type) = 0;
virtual double_t get_max_step_scale_y(::streamfx::gfx::blur::type type) = 0;
};
} // namespace blur
} // namespace streamfx::gfx

View File

@ -0,0 +1,366 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2019-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// AUTOGENERATED COPYRIGHT HEADER END
#include "gfx-blur-box-linear.hpp"
#include "common.hpp"
#include "obs/gs/gs-helper.hpp"
#include "plugin.hpp"
#include "warning-disable.hpp"
#include <cmath>
#include <memory>
#include <stdexcept>
#include "warning-enable.hpp"
#define ST_MAX_BLUR_SIZE 128 // Also change this in box-linear.effect if modified.
streamfx::gfx::blur::box_linear_data::box_linear_data() : _gfx_util(::streamfx::gfx::util::get())
{
auto gctx = streamfx::obs::gs::context();
{
auto file = streamfx::data_file_path("effects/blur/box-linear.effect");
try {
_effect = streamfx::obs::gs::effect::create(file);
} catch (const std::exception& ex) {
DLOG_ERROR("Error loading '%s': %s", file.generic_u8string().c_str(), ex.what());
}
}
}
streamfx::gfx::blur::box_linear_data::~box_linear_data()
{
auto gctx = streamfx::obs::gs::context();
_effect.reset();
}
std::shared_ptr<streamfx::gfx::util> streamfx::gfx::blur::box_linear_data::get_gfx_util()
{
return _gfx_util;
}
streamfx::obs::gs::effect streamfx::gfx::blur::box_linear_data::get_effect()
{
return _effect;
}
streamfx::gfx::blur::box_linear_factory::box_linear_factory() {}
streamfx::gfx::blur::box_linear_factory::~box_linear_factory() {}
bool streamfx::gfx::blur::box_linear_factory::is_type_supported(::streamfx::gfx::blur::type type)
{
switch (type) {
case ::streamfx::gfx::blur::type::Area:
case ::streamfx::gfx::blur::type::Directional:
return true;
default:
return false;
}
}
std::shared_ptr<::streamfx::gfx::blur::base> streamfx::gfx::blur::box_linear_factory::create(::streamfx::gfx::blur::type type)
{
switch (type) {
case ::streamfx::gfx::blur::type::Area:
return std::make_shared<::streamfx::gfx::blur::box_linear>();
case ::streamfx::gfx::blur::type::Directional:
return std::make_shared<::streamfx::gfx::blur::box_linear_directional>();
default:
throw std::runtime_error("Invalid type.");
}
}
double_t streamfx::gfx::blur::box_linear_factory::get_min_size(::streamfx::gfx::blur::type)
{
return double_t(1.0);
}
double_t streamfx::gfx::blur::box_linear_factory::get_step_size(::streamfx::gfx::blur::type)
{
return double_t(1.0);
}
double_t streamfx::gfx::blur::box_linear_factory::get_max_size(::streamfx::gfx::blur::type)
{
return double_t(ST_MAX_BLUR_SIZE);
}
double_t streamfx::gfx::blur::box_linear_factory::get_min_angle(::streamfx::gfx::blur::type v)
{
switch (v) {
case ::streamfx::gfx::blur::type::Directional:
case ::streamfx::gfx::blur::type::Rotational:
return -180.0;
default:
return 0;
}
}
double_t streamfx::gfx::blur::box_linear_factory::get_step_angle(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::box_linear_factory::get_max_angle(::streamfx::gfx::blur::type v)
{
switch (v) {
case ::streamfx::gfx::blur::type::Directional:
case ::streamfx::gfx::blur::type::Rotational:
return 180.0;
default:
return 0;
}
}
bool streamfx::gfx::blur::box_linear_factory::is_step_scale_supported(::streamfx::gfx::blur::type v)
{
switch (v) {
case ::streamfx::gfx::blur::type::Area:
case ::streamfx::gfx::blur::type::Zoom:
case ::streamfx::gfx::blur::type::Directional:
return true;
default:
return false;
}
}
double_t streamfx::gfx::blur::box_linear_factory::get_min_step_scale_x(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::box_linear_factory::get_step_step_scale_x(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::box_linear_factory::get_max_step_scale_x(::streamfx::gfx::blur::type)
{
return double_t(1000.0);
}
double_t streamfx::gfx::blur::box_linear_factory::get_min_step_scale_y(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::box_linear_factory::get_step_step_scale_y(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::box_linear_factory::get_max_step_scale_y(::streamfx::gfx::blur::type)
{
return double_t(1000.0);
}
std::shared_ptr<::streamfx::gfx::blur::box_linear_data> streamfx::gfx::blur::box_linear_factory::data()
{
std::unique_lock<std::mutex> ulock(_data_lock);
std::shared_ptr<::streamfx::gfx::blur::box_linear_data> data = _data.lock();
if (!data) {
data = std::make_shared<::streamfx::gfx::blur::box_linear_data>();
_data = data;
}
return data;
}
::streamfx::gfx::blur::box_linear_factory& streamfx::gfx::blur::box_linear_factory::get()
{
static ::streamfx::gfx::blur::box_linear_factory instance;
return instance;
}
streamfx::gfx::blur::box_linear::box_linear() : _data(::streamfx::gfx::blur::box_linear_factory::get().data()), _size(1.), _step_scale({1., 1.})
{
_rendertarget = std::make_shared<::streamfx::obs::gs::texrender>(GS_RGBA, GS_ZS_NONE);
_rendertarget2 = std::make_shared<::streamfx::obs::gs::texrender>(GS_RGBA, GS_ZS_NONE);
}
streamfx::gfx::blur::box_linear::~box_linear() {}
void streamfx::gfx::blur::box_linear::set_input(std::shared_ptr<::streamfx::obs::gs::texture> texture)
{
_input_texture = std::move(texture);
}
::streamfx::gfx::blur::type streamfx::gfx::blur::box_linear::get_type()
{
return ::streamfx::gfx::blur::type::Area;
}
double_t streamfx::gfx::blur::box_linear::get_size()
{
return _size;
}
void streamfx::gfx::blur::box_linear::set_size(double_t width)
{
_size = width;
if (_size < 1.0) {
_size = 1.0;
}
if (_size > ST_MAX_BLUR_SIZE) {
_size = ST_MAX_BLUR_SIZE;
}
}
void streamfx::gfx::blur::box_linear::set_step_scale(double_t x, double_t y)
{
_step_scale = {x, y};
}
void streamfx::gfx::blur::box_linear::get_step_scale(double_t& x, double_t& y)
{
x = _step_scale.first;
y = _step_scale.second;
}
double_t streamfx::gfx::blur::box_linear::get_step_scale_x()
{
return _step_scale.first;
}
double_t streamfx::gfx::blur::box_linear::get_step_scale_y()
{
return _step_scale.second;
}
std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::box_linear::render()
{
auto gctx = streamfx::obs::gs::context();
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdmp = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Box Linear Blur");
#endif
float width = float(_input_texture->width());
float height = float(_input_texture->height());
gs_set_cull_mode(GS_NEITHER);
gs_enable_color(true, true, true, true);
gs_enable_depth_test(false);
gs_depth_function(GS_ALWAYS);
gs_blend_state_push();
gs_reset_blend_state();
gs_enable_blending(false);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_ZERO, GS_ZERO, GS_ZERO);
// Two Pass Blur
streamfx::obs::gs::effect effect = _data->get_effect();
if (effect) {
// Pass 1
effect.get_parameter("pImage").set_texture(_input_texture);
effect.get_parameter("pImageTexel").set_float2(float(1.f / width), 0.f);
effect.get_parameter("pStepScale").set_float2(float(_step_scale.first), float(_step_scale.second));
effect.get_parameter("pSize").set_float(float(_size));
effect.get_parameter("pSizeInverseMul").set_float(float(1.0f / (float(_size) * 2.0f + 1.0f)));
{
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdm = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Horizontal");
#endif
auto op = _rendertarget2->render(uint32_t(width), uint32_t(height));
gs_ortho(0, 1., 0, 1., 0, 1.);
while (gs_effect_loop(effect.get_object(), "Draw")) {
_data->get_gfx_util()->draw_fullscreen_triangle();
}
}
// Pass 2
effect.get_parameter("pImage").set_texture(_rendertarget2->get_texture());
effect.get_parameter("pImageTexel").set_float2(0., float(1.f / height));
{
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdm = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Vertical");
#endif
auto op = _rendertarget->render(uint32_t(width), uint32_t(height));
gs_ortho(0, 1., 0, 1., 0, 1.);
while (gs_effect_loop(effect.get_object(), "Draw")) {
_data->get_gfx_util()->draw_fullscreen_triangle();
}
}
}
gs_blend_state_pop();
return _rendertarget->get_texture();
}
std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::box_linear::get()
{
return _rendertarget->get_texture();
}
streamfx::gfx::blur::box_linear_directional::box_linear_directional() : _angle(0) {}
::streamfx::gfx::blur::type streamfx::gfx::blur::box_linear_directional::get_type()
{
return ::streamfx::gfx::blur::type::Directional;
}
double_t streamfx::gfx::blur::box_linear_directional::get_angle()
{
return D_RAD_TO_DEG(_angle);
}
void streamfx::gfx::blur::box_linear_directional::set_angle(double_t angle)
{
_angle = D_DEG_TO_RAD(angle);
}
std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::box_linear_directional::render()
{
auto gctx = streamfx::obs::gs::context();
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdmp = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Box Linear Directional Blur");
#endif
float width = float(_input_texture->width());
float height = float(_input_texture->height());
gs_blend_state_push();
gs_reset_blend_state();
gs_enable_color(true, true, true, true);
gs_enable_blending(false);
gs_enable_depth_test(false);
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_set_cull_mode(GS_NEITHER);
gs_depth_function(GS_ALWAYS);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_ZERO, GS_ZERO, GS_ZERO);
// One Pass Blur
streamfx::obs::gs::effect effect = _data->get_effect();
if (effect) {
effect.get_parameter("pImage").set_texture(_input_texture);
effect.get_parameter("pImageTexel").set_float2(float(1. / width * cos(_angle)), float(1.f / height * sin(_angle)));
effect.get_parameter("pStepScale").set_float2(float(_step_scale.first), float(_step_scale.second));
effect.get_parameter("pSize").set_float(float(_size));
effect.get_parameter("pSizeInverseMul").set_float(float(1.0f / (float(_size) * 2.0f + 1.0f)));
{
auto op = _rendertarget->render(uint32_t(width), uint32_t(height));
gs_ortho(0, 1., 0, 1., 0, 1.);
while (gs_effect_loop(effect.get_object(), "Draw")) {
_data->get_gfx_util()->draw_fullscreen_triangle();
}
}
}
gs_blend_state_pop();
return _rendertarget->get_texture();
}

View File

@ -0,0 +1,122 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2019-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "common.hpp"
#include "gfx-blur-base.hpp"
#include "gfx/gfx-util.hpp"
#include "obs/gs/gs-effect.hpp"
#include "obs/gs/gs-texrender.hpp"
#include "obs/gs/gs-texture.hpp"
#include "warning-disable.hpp"
#include <mutex>
#include "warning-enable.hpp"
namespace streamfx::gfx {
namespace blur {
class box_linear_data {
streamfx::obs::gs::effect _effect;
std::shared_ptr<streamfx::gfx::util> _gfx_util;
public:
box_linear_data();
virtual ~box_linear_data();
std::shared_ptr<streamfx::gfx::util> get_gfx_util();
streamfx::obs::gs::effect get_effect();
};
class box_linear_factory : public ::streamfx::gfx::blur::ifactory {
std::mutex _data_lock;
std::weak_ptr<::streamfx::gfx::blur::box_linear_data> _data;
public:
box_linear_factory();
virtual ~box_linear_factory() override;
virtual bool is_type_supported(::streamfx::gfx::blur::type type) override;
virtual std::shared_ptr<::streamfx::gfx::blur::base> create(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_size(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_size(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_size(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_angle(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_angle(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_angle(::streamfx::gfx::blur::type type) override;
virtual bool is_step_scale_supported(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_step_scale_x(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_step_scale_x(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_step_scale_x(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_step_scale_y(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_step_scale_y(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_step_scale_y(::streamfx::gfx::blur::type type) override;
std::shared_ptr<::streamfx::gfx::blur::box_linear_data> data();
public: // Singleton
static ::streamfx::gfx::blur::box_linear_factory& get();
};
class box_linear : public ::streamfx::gfx::blur::base {
protected:
std::shared_ptr<::streamfx::gfx::blur::box_linear_data> _data;
double_t _size;
std::pair<double_t, double_t> _step_scale;
std::shared_ptr<::streamfx::obs::gs::texture> _input_texture;
std::shared_ptr<::streamfx::obs::gs::texrender> _rendertarget;
private:
std::shared_ptr<::streamfx::obs::gs::texrender> _rendertarget2;
public:
box_linear();
virtual ~box_linear() override;
virtual void set_input(std::shared_ptr<::streamfx::obs::gs::texture> texture) override;
virtual ::streamfx::gfx::blur::type get_type() override;
virtual double_t get_size() override;
virtual void set_size(double_t width) override;
virtual void set_step_scale(double_t x, double_t y) override;
virtual void get_step_scale(double_t& x, double_t& y) override;
virtual double_t get_step_scale_x() override;
virtual double_t get_step_scale_y() override;
virtual std::shared_ptr<::streamfx::obs::gs::texture> render() override;
virtual std::shared_ptr<::streamfx::obs::gs::texture> get() override;
};
class box_linear_directional : public ::streamfx::gfx::blur::box_linear, public ::streamfx::gfx::blur::base_angle {
double_t _angle;
public:
box_linear_directional();
virtual ::streamfx::gfx::blur::type get_type() override;
virtual double_t get_angle() override;
virtual void set_angle(double_t angle) override;
virtual std::shared_ptr<::streamfx::obs::gs::texture> render() override;
};
} // namespace blur
} // namespace streamfx::gfx

View File

@ -0,0 +1,517 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2019-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// AUTOGENERATED COPYRIGHT HEADER END
#include "gfx-blur-box.hpp"
#include "common.hpp"
#include "obs/gs/gs-helper.hpp"
#include "plugin.hpp"
#include "warning-disable.hpp"
#include <cmath>
#include <memory>
#include <stdexcept>
#include "warning-enable.hpp"
#define ST_MAX_BLUR_SIZE 128 // Also change this in box.effect if modified.
streamfx::gfx::blur::box_data::box_data() : _gfx_util(::streamfx::gfx::util::get())
{
auto gctx = streamfx::obs::gs::context();
{
auto file = streamfx::data_file_path("effects/blur/box.effect");
try {
_effect = streamfx::obs::gs::effect::create(file);
} catch (const std::exception& ex) {
DLOG_ERROR("Error loading '%s': %s", file.generic_u8string().c_str(), ex.what());
}
}
}
streamfx::gfx::blur::box_data::~box_data()
{
auto gctx = streamfx::obs::gs::context();
_effect.reset();
}
std::shared_ptr<streamfx::gfx::util> streamfx::gfx::blur::box_data::get_gfx_util()
{
return _gfx_util;
}
streamfx::obs::gs::effect streamfx::gfx::blur::box_data::get_effect()
{
return _effect;
}
streamfx::gfx::blur::box_factory::box_factory() {}
streamfx::gfx::blur::box_factory::~box_factory() {}
bool streamfx::gfx::blur::box_factory::is_type_supported(::streamfx::gfx::blur::type type)
{
switch (type) {
case ::streamfx::gfx::blur::type::Area:
return true;
case ::streamfx::gfx::blur::type::Directional:
return true;
case ::streamfx::gfx::blur::type::Rotational:
return true;
case ::streamfx::gfx::blur::type::Zoom:
return true;
default:
return false;
}
}
std::shared_ptr<::streamfx::gfx::blur::base> streamfx::gfx::blur::box_factory::create(::streamfx::gfx::blur::type type)
{
switch (type) {
case ::streamfx::gfx::blur::type::Area:
return std::make_shared<::streamfx::gfx::blur::box>();
case ::streamfx::gfx::blur::type::Directional:
return std::static_pointer_cast<::streamfx::gfx::blur::box>(std::make_shared<::streamfx::gfx::blur::box_directional>());
case ::streamfx::gfx::blur::type::Rotational:
return std::make_shared<::streamfx::gfx::blur::box_rotational>();
case ::streamfx::gfx::blur::type::Zoom:
return std::make_shared<::streamfx::gfx::blur::box_zoom>();
default:
throw std::runtime_error("Invalid type.");
}
}
double_t streamfx::gfx::blur::box_factory::get_min_size(::streamfx::gfx::blur::type)
{
return double_t(1.0);
}
double_t streamfx::gfx::blur::box_factory::get_step_size(::streamfx::gfx::blur::type)
{
return double_t(1.0);
}
double_t streamfx::gfx::blur::box_factory::get_max_size(::streamfx::gfx::blur::type)
{
return double_t(ST_MAX_BLUR_SIZE);
}
double_t streamfx::gfx::blur::box_factory::get_min_angle(::streamfx::gfx::blur::type v)
{
switch (v) {
case ::streamfx::gfx::blur::type::Directional:
case ::streamfx::gfx::blur::type::Rotational:
return -180.0;
default:
return 0;
}
}
double_t streamfx::gfx::blur::box_factory::get_step_angle(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::box_factory::get_max_angle(::streamfx::gfx::blur::type v)
{
switch (v) {
case ::streamfx::gfx::blur::type::Directional:
case ::streamfx::gfx::blur::type::Rotational:
return 180.0;
default:
return 0;
}
}
bool streamfx::gfx::blur::box_factory::is_step_scale_supported(::streamfx::gfx::blur::type v)
{
switch (v) {
case ::streamfx::gfx::blur::type::Area:
case ::streamfx::gfx::blur::type::Zoom:
case ::streamfx::gfx::blur::type::Directional:
return true;
default:
return false;
}
}
double_t streamfx::gfx::blur::box_factory::get_min_step_scale_x(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::box_factory::get_step_step_scale_x(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::box_factory::get_max_step_scale_x(::streamfx::gfx::blur::type)
{
return double_t(1000.0);
}
double_t streamfx::gfx::blur::box_factory::get_min_step_scale_y(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::box_factory::get_step_step_scale_y(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::box_factory::get_max_step_scale_y(::streamfx::gfx::blur::type)
{
return double_t(1000.0);
}
std::shared_ptr<::streamfx::gfx::blur::box_data> streamfx::gfx::blur::box_factory::data()
{
std::unique_lock<std::mutex> ulock(_data_lock);
std::shared_ptr<::streamfx::gfx::blur::box_data> data = _data.lock();
if (!data) {
data = std::make_shared<::streamfx::gfx::blur::box_data>();
_data = data;
}
return data;
}
::streamfx::gfx::blur::box_factory& streamfx::gfx::blur::box_factory::get()
{
static ::streamfx::gfx::blur::box_factory instance;
return instance;
}
streamfx::gfx::blur::box::box() : _data(::streamfx::gfx::blur::box_factory::get().data()), _size(1.), _step_scale({1., 1.})
{
auto gctx = streamfx::obs::gs::context();
_rendertarget = std::make_shared<::streamfx::obs::gs::texrender>(GS_RGBA, GS_ZS_NONE);
_rendertarget2 = std::make_shared<::streamfx::obs::gs::texrender>(GS_RGBA, GS_ZS_NONE);
}
streamfx::gfx::blur::box::~box() {}
void streamfx::gfx::blur::box::set_input(std::shared_ptr<::streamfx::obs::gs::texture> texture)
{
_input_texture = std::move(texture);
}
::streamfx::gfx::blur::type streamfx::gfx::blur::box::get_type()
{
return ::streamfx::gfx::blur::type::Area;
}
double_t streamfx::gfx::blur::box::get_size()
{
return _size;
}
void streamfx::gfx::blur::box::set_size(double_t width)
{
_size = width;
if (_size < 1.0) {
_size = 1.0;
}
if (_size > ST_MAX_BLUR_SIZE) {
_size = ST_MAX_BLUR_SIZE;
}
}
void streamfx::gfx::blur::box::set_step_scale(double_t x, double_t y)
{
_step_scale = {x, y};
}
void streamfx::gfx::blur::box::get_step_scale(double_t& x, double_t& y)
{
x = _step_scale.first;
y = _step_scale.second;
}
double_t streamfx::gfx::blur::box::get_step_scale_x()
{
return _step_scale.first;
}
double_t streamfx::gfx::blur::box::get_step_scale_y()
{
return _step_scale.second;
}
std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::box::render()
{
auto gctx = streamfx::obs::gs::context();
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdmp = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Box Blur");
#endif
float width = float(_input_texture->width());
float height = float(_input_texture->height());
gs_set_cull_mode(GS_NEITHER);
gs_enable_color(true, true, true, true);
gs_enable_depth_test(false);
gs_depth_function(GS_ALWAYS);
gs_blend_state_push();
gs_reset_blend_state();
gs_enable_blending(false);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_ZERO, GS_ZERO, GS_ZERO);
// Two Pass Blur
streamfx::obs::gs::effect effect = _data->get_effect();
if (effect) {
// Pass 1
effect.get_parameter("pImage").set_texture(_input_texture);
effect.get_parameter("pImageTexel").set_float2(float(1.f / width), 0.f);
effect.get_parameter("pStepScale").set_float2(float(_step_scale.first), float(_step_scale.second));
effect.get_parameter("pSize").set_float(float(_size));
effect.get_parameter("pSizeInverseMul").set_float(float(1.0f / (float(_size) * 2.0f + 1.0f)));
{
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdm = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Horizontal");
#endif
auto op = _rendertarget2->render(uint32_t(width), uint32_t(height));
gs_ortho(0, 1., 0, 1., 0, 1.);
while (gs_effect_loop(effect.get_object(), "Draw")) {
_data->get_gfx_util()->draw_fullscreen_triangle();
}
}
// Pass 2
effect.get_parameter("pImage").set_texture(_rendertarget2->get_texture());
effect.get_parameter("pImageTexel").set_float2(0.f, float(1.f / height));
{
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdm = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Vertical");
#endif
auto op = _rendertarget->render(uint32_t(width), uint32_t(height));
gs_ortho(0, 1., 0, 1., 0, 1.);
while (gs_effect_loop(effect.get_object(), "Draw")) {
_data->get_gfx_util()->draw_fullscreen_triangle();
}
}
}
gs_blend_state_pop();
return _rendertarget->get_texture();
}
std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::box::get()
{
return _rendertarget->get_texture();
}
streamfx::gfx::blur::box_directional::box_directional() : _angle(0) {}
::streamfx::gfx::blur::type streamfx::gfx::blur::box_directional::get_type()
{
return ::streamfx::gfx::blur::type::Directional;
}
double_t streamfx::gfx::blur::box_directional::get_angle()
{
return D_RAD_TO_DEG(_angle);
}
void streamfx::gfx::blur::box_directional::set_angle(double_t angle)
{
_angle = D_DEG_TO_RAD(angle);
}
std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::box_directional::render()
{
auto gctx = streamfx::obs::gs::context();
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdmp = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Box Directional Blur");
#endif
float width = float(_input_texture->width());
float height = float(_input_texture->height());
gs_blend_state_push();
gs_reset_blend_state();
gs_enable_color(true, true, true, true);
gs_enable_blending(false);
gs_enable_depth_test(false);
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_set_cull_mode(GS_NEITHER);
gs_depth_function(GS_ALWAYS);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_ZERO, GS_ZERO, GS_ZERO);
// One Pass Blur
streamfx::obs::gs::effect effect = _data->get_effect();
if (effect) {
effect.get_parameter("pImage").set_texture(_input_texture);
effect.get_parameter("pImageTexel").set_float2(float(1. / width * cos(_angle)), float(1.f / height * sin(_angle)));
effect.get_parameter("pStepScale").set_float2(float(_step_scale.first), float(_step_scale.second));
effect.get_parameter("pSize").set_float(float(_size));
effect.get_parameter("pSizeInverseMul").set_float(float(1.0f / (float(_size) * 2.0f + 1.0f)));
{
auto op = _rendertarget->render(uint32_t(width), uint32_t(height));
gs_ortho(0, 1., 0, 1., 0, 1.);
while (gs_effect_loop(effect.get_object(), "Draw")) {
_data->get_gfx_util()->draw_fullscreen_triangle();
}
}
}
gs_blend_state_pop();
return _rendertarget->get_texture();
}
::streamfx::gfx::blur::type streamfx::gfx::blur::box_rotational::get_type()
{
return ::streamfx::gfx::blur::type::Rotational;
}
void streamfx::gfx::blur::box_rotational::set_center(double_t x, double_t y)
{
_center.first = x;
_center.second = y;
}
void streamfx::gfx::blur::box_rotational::get_center(double_t& x, double_t& y)
{
x = _center.first;
y = _center.second;
}
double_t streamfx::gfx::blur::box_rotational::get_angle()
{
return D_RAD_TO_DEG(_angle);
}
void streamfx::gfx::blur::box_rotational::set_angle(double_t angle)
{
_angle = D_DEG_TO_RAD(angle);
}
std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::box_rotational::render()
{
auto gctx = streamfx::obs::gs::context();
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdmp = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Box Rotational Blur");
#endif
float width = float(_input_texture->width());
float height = float(_input_texture->height());
gs_blend_state_push();
gs_reset_blend_state();
gs_enable_color(true, true, true, true);
gs_enable_blending(false);
gs_enable_depth_test(false);
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_set_cull_mode(GS_NEITHER);
gs_depth_function(GS_ALWAYS);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_ZERO, GS_ZERO, GS_ZERO);
// One Pass Blur
streamfx::obs::gs::effect effect = _data->get_effect();
if (effect) {
effect.get_parameter("pImage").set_texture(_input_texture);
effect.get_parameter("pImageTexel").set_float2(float(1.f / width), float(1.f / height));
effect.get_parameter("pStepScale").set_float2(float(_step_scale.first), float(_step_scale.second));
effect.get_parameter("pSize").set_float(float(_size));
effect.get_parameter("pSizeInverseMul").set_float(float(1.0f / (float(_size) * 2.0f + 1.0f)));
effect.get_parameter("pAngle").set_float(float(_angle / _size));
effect.get_parameter("pCenter").set_float2(float(_center.first), float(_center.second));
{
auto op = _rendertarget->render(uint32_t(width), uint32_t(height));
gs_ortho(0, 1., 0, 1., 0, 1.);
while (gs_effect_loop(effect.get_object(), "Rotate")) {
_data->get_gfx_util()->draw_fullscreen_triangle();
}
}
}
gs_blend_state_pop();
return _rendertarget->get_texture();
}
::streamfx::gfx::blur::type streamfx::gfx::blur::box_zoom::get_type()
{
return ::streamfx::gfx::blur::type::Zoom;
}
void streamfx::gfx::blur::box_zoom::set_center(double_t x, double_t y)
{
_center.first = x;
_center.second = y;
}
void streamfx::gfx::blur::box_zoom::get_center(double_t& x, double_t& y)
{
x = _center.first;
y = _center.second;
}
std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::box_zoom::render()
{
auto gctx = streamfx::obs::gs::context();
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdmp = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Box Zoom Blur");
#endif
float width = float(_input_texture->width());
float height = float(_input_texture->height());
gs_blend_state_push();
gs_reset_blend_state();
gs_enable_color(true, true, true, true);
gs_enable_blending(false);
gs_enable_depth_test(false);
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_set_cull_mode(GS_NEITHER);
gs_depth_function(GS_ALWAYS);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_ZERO, GS_ZERO, GS_ZERO);
// One Pass Blur
streamfx::obs::gs::effect effect = _data->get_effect();
if (effect) {
effect.get_parameter("pImage").set_texture(_input_texture);
effect.get_parameter("pImageTexel").set_float2(float(1.f / width), float(1.f / height));
effect.get_parameter("pStepScale").set_float2(float(_step_scale.first), float(_step_scale.second));
effect.get_parameter("pSize").set_float(float(_size));
effect.get_parameter("pSizeInverseMul").set_float(float(1.0f / (float(_size) * 2.0f + 1.0f)));
effect.get_parameter("pCenter").set_float2(float(_center.first), float(_center.second));
{
auto op = _rendertarget->render(uint32_t(width), uint32_t(height));
gs_ortho(0, 1., 0, 1., 0, 1.);
while (gs_effect_loop(effect.get_object(), "Zoom")) {
_data->get_gfx_util()->draw_fullscreen_triangle();
}
}
}
gs_blend_state_pop();
return _rendertarget->get_texture();
}

View File

@ -0,0 +1,150 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2019-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "common.hpp"
#include "gfx-blur-base.hpp"
#include "gfx/gfx-util.hpp"
#include "obs/gs/gs-effect.hpp"
#include "obs/gs/gs-texrender.hpp"
#include "obs/gs/gs-texture.hpp"
#include "warning-disable.hpp"
#include <mutex>
#include "warning-enable.hpp"
namespace streamfx::gfx {
namespace blur {
class box_data {
streamfx::obs::gs::effect _effect;
std::shared_ptr<streamfx::gfx::util> _gfx_util;
public:
box_data();
virtual ~box_data();
std::shared_ptr<streamfx::gfx::util> get_gfx_util();
streamfx::obs::gs::effect get_effect();
};
class box_factory : public ::streamfx::gfx::blur::ifactory {
std::mutex _data_lock;
std::weak_ptr<::streamfx::gfx::blur::box_data> _data;
public:
box_factory();
virtual ~box_factory() override;
virtual bool is_type_supported(::streamfx::gfx::blur::type type) override;
virtual std::shared_ptr<::streamfx::gfx::blur::base> create(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_size(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_size(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_size(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_angle(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_angle(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_angle(::streamfx::gfx::blur::type type) override;
virtual bool is_step_scale_supported(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_step_scale_x(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_step_scale_x(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_step_scale_x(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_step_scale_y(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_step_scale_y(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_step_scale_y(::streamfx::gfx::blur::type type) override;
std::shared_ptr<::streamfx::gfx::blur::box_data> data();
public: // Singleton
static ::streamfx::gfx::blur::box_factory& get();
};
class box : public ::streamfx::gfx::blur::base {
protected:
std::shared_ptr<::streamfx::gfx::blur::box_data> _data;
double_t _size;
std::pair<double_t, double_t> _step_scale;
std::shared_ptr<::streamfx::obs::gs::texture> _input_texture;
std::shared_ptr<::streamfx::obs::gs::texrender> _rendertarget;
private:
std::shared_ptr<::streamfx::obs::gs::texrender> _rendertarget2;
public:
box();
virtual ~box() override;
virtual void set_input(std::shared_ptr<::streamfx::obs::gs::texture> texture) override;
virtual ::streamfx::gfx::blur::type get_type() override;
virtual double_t get_size() override;
virtual void set_size(double_t width) override;
virtual void set_step_scale(double_t x, double_t y) override;
virtual void get_step_scale(double_t& x, double_t& y) override;
virtual double_t get_step_scale_x() override;
virtual double_t get_step_scale_y() override;
virtual std::shared_ptr<::streamfx::obs::gs::texture> render() override;
virtual std::shared_ptr<::streamfx::obs::gs::texture> get() override;
};
class box_directional : public ::streamfx::gfx::blur::box, public ::streamfx::gfx::blur::base_angle {
double_t _angle;
public:
box_directional();
virtual ::streamfx::gfx::blur::type get_type() override;
virtual double_t get_angle() override;
virtual void set_angle(double_t angle) override;
virtual std::shared_ptr<::streamfx::obs::gs::texture> render() override;
};
class box_rotational : public ::streamfx::gfx::blur::box, public ::streamfx::gfx::blur::base_angle, public ::streamfx::gfx::blur::base_center {
std::pair<double_t, double_t> _center;
double_t _angle;
public:
virtual ::streamfx::gfx::blur::type get_type() override;
virtual void set_center(double_t x, double_t y) override;
virtual void get_center(double_t& x, double_t& y) override;
virtual double_t get_angle() override;
virtual void set_angle(double_t angle) override;
virtual std::shared_ptr<::streamfx::obs::gs::texture> render() override;
};
class box_zoom : public ::streamfx::gfx::blur::box, public ::streamfx::gfx::blur::base_center {
std::pair<double_t, double_t> _center;
public:
virtual ::streamfx::gfx::blur::type get_type() override;
virtual void set_center(double_t x, double_t y) override;
virtual void get_center(double_t& x, double_t& y) override;
virtual std::shared_ptr<::streamfx::obs::gs::texture> render() override;
};
} // namespace blur
} // namespace streamfx::gfx

View File

@ -0,0 +1,316 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2019-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// AUTOGENERATED COPYRIGHT HEADER END
#include "gfx-blur-dual-filtering.hpp"
#include "common.hpp"
#include "obs/gs/gs-helper.hpp"
#include "plugin.hpp"
#include "warning-disable.hpp"
#include <algorithm>
#include <stdexcept>
#include "warning-enable.hpp"
// Dual Filtering Blur
//
// This type of Blur uses downsampling and upsampling and clever math. That makes it less
// controllable compared to other blur, but it can still be worked with. The distance for
// sampling texels has to be adjusted to match the correct value so that lower levels of
// blur than 2^n are possible.
//
// That means that for a blur size of:
// 0: No Iterations, straight copy.
// 1: 1 Iteration (2x), Arm Size 2, Offset Scale 1.0
// 2: 2 Iteration (4x), Arm Size 3, Offset Scale 0.5
// 3: 2 Iteration (4x), Arm Size 4, Offset Scale 1.0
// 4: 3 Iteration (8x), Arm Size 5, Offset Scale 0.25
// 5: 3 Iteration (8x), Arm Size 6, Offset Scale 0.5
// 6: 3 Iteration (8x), Arm Size 7, Offset Scale 0.75
// 7: 3 Iteration (8x), Arm Size 8, Offset Scale 1.0
// ...
#define ST_MAX_LEVELS 16
streamfx::gfx::blur::dual_filtering_data::dual_filtering_data() : _gfx_util(::streamfx::gfx::util::get())
{
auto gctx = streamfx::obs::gs::context();
{
auto file = streamfx::data_file_path("effects/blur/dual-filtering.effect");
try {
_effect = streamfx::obs::gs::effect::create(file);
} catch (const std::exception& ex) {
DLOG_ERROR("Error loading '%s': %s", file.generic_u8string().c_str(), ex.what());
}
}
}
streamfx::gfx::blur::dual_filtering_data::~dual_filtering_data()
{
auto gctx = streamfx::obs::gs::context();
_effect.reset();
}
std::shared_ptr<streamfx::gfx::util> streamfx::gfx::blur::dual_filtering_data::get_gfx_util()
{
return _gfx_util;
}
streamfx::obs::gs::effect streamfx::gfx::blur::dual_filtering_data::get_effect()
{
return _effect;
}
streamfx::gfx::blur::dual_filtering_factory::dual_filtering_factory() {}
streamfx::gfx::blur::dual_filtering_factory::~dual_filtering_factory() {}
bool streamfx::gfx::blur::dual_filtering_factory::is_type_supported(::streamfx::gfx::blur::type type)
{
switch (type) {
case ::streamfx::gfx::blur::type::Area:
return true;
default:
return false;
}
}
std::shared_ptr<::streamfx::gfx::blur::base> streamfx::gfx::blur::dual_filtering_factory::create(::streamfx::gfx::blur::type type)
{
switch (type) {
case ::streamfx::gfx::blur::type::Area:
return std::make_shared<::streamfx::gfx::blur::dual_filtering>();
default:
throw std::runtime_error("Invalid type.");
}
}
double_t streamfx::gfx::blur::dual_filtering_factory::get_min_size(::streamfx::gfx::blur::type)
{
return double_t(1.);
}
double_t streamfx::gfx::blur::dual_filtering_factory::get_step_size(::streamfx::gfx::blur::type)
{
return double_t(1.);
}
double_t streamfx::gfx::blur::dual_filtering_factory::get_max_size(::streamfx::gfx::blur::type)
{
return double_t(ST_MAX_LEVELS);
}
double_t streamfx::gfx::blur::dual_filtering_factory::get_min_angle(::streamfx::gfx::blur::type)
{
return double_t(0);
}
double_t streamfx::gfx::blur::dual_filtering_factory::get_step_angle(::streamfx::gfx::blur::type)
{
return double_t(0);
}
double_t streamfx::gfx::blur::dual_filtering_factory::get_max_angle(::streamfx::gfx::blur::type)
{
return double_t(0);
}
bool streamfx::gfx::blur::dual_filtering_factory::is_step_scale_supported(::streamfx::gfx::blur::type)
{
return false;
}
double_t streamfx::gfx::blur::dual_filtering_factory::get_min_step_scale_x(::streamfx::gfx::blur::type)
{
return double_t(0);
}
double_t streamfx::gfx::blur::dual_filtering_factory::get_step_step_scale_x(::streamfx::gfx::blur::type)
{
return double_t(0);
}
double_t streamfx::gfx::blur::dual_filtering_factory::get_max_step_scale_x(::streamfx::gfx::blur::type)
{
return double_t(0);
}
double_t streamfx::gfx::blur::dual_filtering_factory::get_min_step_scale_y(::streamfx::gfx::blur::type)
{
return double_t(0);
}
double_t streamfx::gfx::blur::dual_filtering_factory::get_step_step_scale_y(::streamfx::gfx::blur::type)
{
return double_t(0);
}
double_t streamfx::gfx::blur::dual_filtering_factory::get_max_step_scale_y(::streamfx::gfx::blur::type)
{
return double_t(0);
}
std::shared_ptr<::streamfx::gfx::blur::dual_filtering_data> streamfx::gfx::blur::dual_filtering_factory::data()
{
std::unique_lock<std::mutex> ulock(_data_lock);
std::shared_ptr<::streamfx::gfx::blur::dual_filtering_data> data = _data.lock();
if (!data) {
data = std::make_shared<::streamfx::gfx::blur::dual_filtering_data>();
_data = data;
}
return data;
}
::streamfx::gfx::blur::dual_filtering_factory& streamfx::gfx::blur::dual_filtering_factory::get()
{
static ::streamfx::gfx::blur::dual_filtering_factory instance;
return instance;
}
streamfx::gfx::blur::dual_filtering::dual_filtering() : _data(::streamfx::gfx::blur::dual_filtering_factory::get().data()), _size(0), _iterations(0)
{
auto gctx = streamfx::obs::gs::context();
_rts.resize(ST_MAX_LEVELS + 1);
for (std::size_t n = 0; n <= ST_MAX_LEVELS; n++) {
gs_color_format cf = GS_RGBA;
#if 0
cf = GS_RGBA16F;
#elif 0
cf = GS_RGBA32F;
#endif
_rts[n] = std::make_shared<streamfx::obs::gs::texrender>(cf, GS_ZS_NONE);
}
}
streamfx::gfx::blur::dual_filtering::~dual_filtering() {}
void streamfx::gfx::blur::dual_filtering::set_input(std::shared_ptr<::streamfx::obs::gs::texture> texture)
{
_input_texture = std::move(texture);
}
::streamfx::gfx::blur::type streamfx::gfx::blur::dual_filtering::get_type()
{
return ::streamfx::gfx::blur::type::Area;
}
double_t streamfx::gfx::blur::dual_filtering::get_size()
{
return _size;
}
void streamfx::gfx::blur::dual_filtering::set_size(double_t width)
{
_size = width;
_iterations = std::clamp<size_t>(static_cast<size_t>(round(width)), 0, ST_MAX_LEVELS);
}
void streamfx::gfx::blur::dual_filtering::set_step_scale(double_t, double_t) {}
void streamfx::gfx::blur::dual_filtering::get_step_scale(double_t&, double_t&) {}
std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::dual_filtering::render()
{
auto gctx = streamfx::obs::gs::context();
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdmp = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Dual-Filtering Blur");
#endif
auto effect = _data->get_effect();
if (!effect) {
return _input_texture;
}
gs_blend_state_push();
gs_reset_blend_state();
gs_enable_color(true, true, true, true);
gs_enable_blending(false);
gs_enable_depth_test(false);
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_set_cull_mode(GS_NEITHER);
gs_depth_function(GS_ALWAYS);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_ZERO, GS_ZERO, GS_ZERO);
uint32_t width = _input_texture->width();
uint32_t height = _input_texture->height();
size_t iterations = _iterations;
// Downsample
for (std::size_t n = 1; n <= iterations; n++) {
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdm = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Down %" PRIuMAX, n);
#endif
// Select Texture
std::shared_ptr<streamfx::obs::gs::texture> tex;
if (n > 1) {
tex = _rts[n - 1]->get_texture();
} else { // Idx 0 is a simply considered as a straight copy of the original and not rendered to.
tex = _input_texture;
}
// Reduce Size
uint32_t owidth = width >> n;
uint32_t oheight = height >> n;
if ((owidth == 0) || (oheight == 0)) {
iterations = n - 1;
break;
}
// Apply
effect.get_parameter("pImage").set_texture(tex);
effect.get_parameter("pImageSize").set_float2(static_cast<float>(owidth), static_cast<float>(oheight));
effect.get_parameter("pImageTexel").set_float2(0.5f / static_cast<float>(owidth), 0.5f / static_cast<float>(oheight));
{
auto op = _rts[n]->render(owidth, oheight);
gs_ortho(0., 1., 0., 1., 0., 1.);
while (gs_effect_loop(effect.get_object(), "Down")) {
_data->get_gfx_util()->draw_fullscreen_triangle();
}
}
}
// Upsample
for (std::size_t n = iterations; n > 0; n--) {
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdm = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Up %" PRIuMAX, n);
#endif
// Select Texture
std::shared_ptr<streamfx::obs::gs::texture> tex = _rts[n]->get_texture();
// Get Size
uint32_t iwidth = tex->width();
uint32_t iheight = tex->height();
uint32_t owidth = width >> (n - 1);
uint32_t oheight = height >> (n - 1);
// Apply
effect.get_parameter("pImage").set_texture(tex);
effect.get_parameter("pImageSize").set_float2(static_cast<float>(iwidth), static_cast<float>(iheight));
effect.get_parameter("pImageTexel").set_float2(0.5f / static_cast<float>(iwidth), 0.5f / static_cast<float>(iheight));
{
auto op = _rts[n - 1]->render(owidth, oheight);
gs_ortho(0., 1., 0., 1., 0., 1.);
while (gs_effect_loop(effect.get_object(), "Up")) {
_data->get_gfx_util()->draw_fullscreen_triangle();
}
}
}
gs_blend_state_pop();
return _rts[0]->get_texture();
}
std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::dual_filtering::get()
{
return _rts[0]->get_texture();
}

View File

@ -0,0 +1,108 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2019-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "common.hpp"
#include "gfx-blur-base.hpp"
#include "gfx/gfx-util.hpp"
#include "obs/gs/gs-effect.hpp"
#include "obs/gs/gs-texrender.hpp"
#include "obs/gs/gs-texture.hpp"
#include "warning-disable.hpp"
#include <mutex>
#include <vector>
#include "warning-enable.hpp"
namespace streamfx::gfx {
namespace blur {
class dual_filtering_data {
streamfx::obs::gs::effect _effect;
std::shared_ptr<streamfx::gfx::util> _gfx_util;
public:
dual_filtering_data();
virtual ~dual_filtering_data();
std::shared_ptr<streamfx::gfx::util> get_gfx_util();
streamfx::obs::gs::effect get_effect();
};
class dual_filtering_factory : public ::streamfx::gfx::blur::ifactory {
std::mutex _data_lock;
std::weak_ptr<::streamfx::gfx::blur::dual_filtering_data> _data;
public:
dual_filtering_factory();
virtual ~dual_filtering_factory() override;
virtual bool is_type_supported(::streamfx::gfx::blur::type type) override;
virtual std::shared_ptr<::streamfx::gfx::blur::base> create(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_size(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_size(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_size(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_angle(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_angle(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_angle(::streamfx::gfx::blur::type type) override;
virtual bool is_step_scale_supported(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_step_scale_x(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_step_scale_x(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_step_scale_x(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_step_scale_y(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_step_scale_y(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_step_scale_y(::streamfx::gfx::blur::type type) override;
std::shared_ptr<::streamfx::gfx::blur::dual_filtering_data> data();
public: // Singleton
static ::streamfx::gfx::blur::dual_filtering_factory& get();
};
class dual_filtering : public ::streamfx::gfx::blur::base {
std::shared_ptr<::streamfx::gfx::blur::dual_filtering_data> _data;
double_t _size;
std::size_t _iterations;
std::shared_ptr<streamfx::obs::gs::texture> _input_texture;
std::vector<std::shared_ptr<streamfx::obs::gs::texrender>> _rts;
public:
dual_filtering();
virtual ~dual_filtering() override;
virtual void set_input(std::shared_ptr<::streamfx::obs::gs::texture> texture) override;
virtual ::streamfx::gfx::blur::type get_type() override;
virtual double_t get_size() override;
virtual void set_size(double_t width) override;
virtual void set_step_scale(double_t x, double_t y) override;
virtual void get_step_scale(double_t& x, double_t& y) override;
virtual std::shared_ptr<::streamfx::obs::gs::texture> render() override;
virtual std::shared_ptr<::streamfx::obs::gs::texture> get() override;
};
} // namespace blur
} // namespace streamfx::gfx

View File

@ -0,0 +1,435 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2019-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// AUTOGENERATED COPYRIGHT HEADER END
#include "gfx-blur-gaussian-linear.hpp"
#include "common.hpp"
#include "obs/gs/gs-helper.hpp"
#include "warning-disable.hpp"
#include <stdexcept>
#include "warning-enable.hpp"
// FIXME: This breaks when MAX_KERNEL_SIZE is changed, due to the way the Gaussian
// function first goes up at the point, and then once we pass the critical point
// will go down again and it is not handled well. This is a pretty basic
// approximation anyway at the moment.
#define ST_MAX_KERNEL_SIZE 128
#define ST_MAX_BLUR_SIZE (ST_MAX_KERNEL_SIZE - 1)
#define ST_SEARCH_DENSITY double_t(1. / 500.)
#define ST_SEARCH_THRESHOLD double_t(1. / (ST_MAX_KERNEL_SIZE * 5))
#define ST_SEARCH_EXTENSION 1
#define ST_SEARCH_RANGE ST_MAX_KERNEL_SIZE * 2
streamfx::gfx::blur::gaussian_linear_data::gaussian_linear_data() : _gfx_util(::streamfx::gfx::util::get())
{
{
auto gctx = streamfx::obs::gs::context();
{
auto file = streamfx::data_file_path("effects/blur/gaussian-linear.effect");
try {
_effect = streamfx::obs::gs::effect::create(file);
} catch (const std::exception& ex) {
DLOG_ERROR("Error loading '%s': %s", file.generic_u8string().c_str(), ex.what());
}
}
}
// Precalculate Kernels
for (std::size_t kernel_size = 1; kernel_size <= ST_MAX_BLUR_SIZE; kernel_size++) {
std::vector<double_t> kernel_math(ST_MAX_KERNEL_SIZE);
std::vector<float> kernel_data(ST_MAX_KERNEL_SIZE);
double_t actual_width = 1.;
// Find actual kernel width.
for (double_t h = ST_SEARCH_DENSITY; h < ST_SEARCH_RANGE; h += ST_SEARCH_DENSITY) {
if (streamfx::util::math::gaussian<double_t>(double_t(kernel_size + ST_SEARCH_EXTENSION), h) > ST_SEARCH_THRESHOLD) {
actual_width = h;
break;
}
}
// Calculate and normalize
double_t sum = 0;
for (std::size_t p = 0; p <= kernel_size; p++) {
kernel_math[p] = streamfx::util::math::gaussian<double_t>(double_t(p), actual_width);
sum += kernel_math[p] * (p > 0 ? 2 : 1);
}
// Normalize to fill the entire 0..1 range over the width.
double_t inverse_sum = 1.0 / sum;
for (std::size_t p = 0; p <= kernel_size; p++) {
kernel_data.at(p) = float(kernel_math[p] * inverse_sum);
}
_kernels.push_back(std::move(kernel_data));
}
}
streamfx::gfx::blur::gaussian_linear_data::~gaussian_linear_data()
{
_effect.reset();
}
streamfx::obs::gs::effect streamfx::gfx::blur::gaussian_linear_data::get_effect()
{
return _effect;
}
std::vector<float> const& streamfx::gfx::blur::gaussian_linear_data::get_kernel(std::size_t width)
{
if (width < 1)
width = 1;
if (width > ST_MAX_BLUR_SIZE)
width = ST_MAX_BLUR_SIZE;
width -= 1;
return _kernels[width];
}
std::shared_ptr<streamfx::gfx::util> streamfx::gfx::blur::gaussian_linear_data::get_gfx_util()
{
return _gfx_util;
}
streamfx::gfx::blur::gaussian_linear_factory::gaussian_linear_factory() {}
streamfx::gfx::blur::gaussian_linear_factory::~gaussian_linear_factory() {}
bool streamfx::gfx::blur::gaussian_linear_factory::is_type_supported(::streamfx::gfx::blur::type v)
{
switch (v) {
case ::streamfx::gfx::blur::type::Area:
case ::streamfx::gfx::blur::type::Directional:
return true;
default:
return false;
}
}
std::shared_ptr<::streamfx::gfx::blur::base> streamfx::gfx::blur::gaussian_linear_factory::create(::streamfx::gfx::blur::type v)
{
switch (v) {
case ::streamfx::gfx::blur::type::Area:
return std::make_shared<::streamfx::gfx::blur::gaussian_linear>();
case ::streamfx::gfx::blur::type::Directional:
return std::static_pointer_cast<::streamfx::gfx::blur::gaussian_linear>(std::make_shared<::streamfx::gfx::blur::gaussian_linear_directional>());
default:
throw std::runtime_error("Invalid type.");
}
}
double_t streamfx::gfx::blur::gaussian_linear_factory::get_min_size(::streamfx::gfx::blur::type)
{
return double_t(1.0);
}
double_t streamfx::gfx::blur::gaussian_linear_factory::get_step_size(::streamfx::gfx::blur::type)
{
return double_t(1.0);
}
double_t streamfx::gfx::blur::gaussian_linear_factory::get_max_size(::streamfx::gfx::blur::type)
{
return double_t(ST_MAX_BLUR_SIZE);
}
double_t streamfx::gfx::blur::gaussian_linear_factory::get_min_angle(::streamfx::gfx::blur::type v)
{
switch (v) {
case ::streamfx::gfx::blur::type::Directional:
case ::streamfx::gfx::blur::type::Rotational:
return -180.0;
default:
return 0;
}
}
double_t streamfx::gfx::blur::gaussian_linear_factory::get_step_angle(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::gaussian_linear_factory::get_max_angle(::streamfx::gfx::blur::type v)
{
switch (v) {
case ::streamfx::gfx::blur::type::Directional:
case ::streamfx::gfx::blur::type::Rotational:
return 180.0;
default:
return 0;
}
}
bool streamfx::gfx::blur::gaussian_linear_factory::is_step_scale_supported(::streamfx::gfx::blur::type v)
{
switch (v) {
case ::streamfx::gfx::blur::type::Area:
case ::streamfx::gfx::blur::type::Zoom:
case ::streamfx::gfx::blur::type::Directional:
return true;
default:
return false;
}
}
double_t streamfx::gfx::blur::gaussian_linear_factory::get_min_step_scale_x(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::gaussian_linear_factory::get_step_step_scale_x(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::gaussian_linear_factory::get_max_step_scale_x(::streamfx::gfx::blur::type)
{
return double_t(1000.0);
}
double_t streamfx::gfx::blur::gaussian_linear_factory::get_min_step_scale_y(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::gaussian_linear_factory::get_step_step_scale_y(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::gaussian_linear_factory::get_max_step_scale_y(::streamfx::gfx::blur::type)
{
return double_t(1000.0);
}
std::shared_ptr<::streamfx::gfx::blur::gaussian_linear_data> streamfx::gfx::blur::gaussian_linear_factory::data()
{
std::unique_lock<std::mutex> ulock(_data_lock);
std::shared_ptr<::streamfx::gfx::blur::gaussian_linear_data> data = _data.lock();
if (!data) {
data = std::make_shared<::streamfx::gfx::blur::gaussian_linear_data>();
_data = data;
}
return data;
}
::streamfx::gfx::blur::gaussian_linear_factory& streamfx::gfx::blur::gaussian_linear_factory::get()
{
static ::streamfx::gfx::blur::gaussian_linear_factory instance;
return instance;
}
streamfx::gfx::blur::gaussian_linear::gaussian_linear() : _data(::streamfx::gfx::blur::gaussian_linear_factory::get().data()), _size(1.), _step_scale({1., 1.})
{
auto gctx = streamfx::obs::gs::context();
_rendertarget = std::make_shared<streamfx::obs::gs::texrender>(GS_RGBA, GS_ZS_NONE);
_rendertarget2 = std::make_shared<streamfx::obs::gs::texrender>(GS_RGBA, GS_ZS_NONE);
}
streamfx::gfx::blur::gaussian_linear::~gaussian_linear() {}
void streamfx::gfx::blur::gaussian_linear::set_input(std::shared_ptr<::streamfx::obs::gs::texture> texture)
{
_input_texture = std::move(texture);
}
::streamfx::gfx::blur::type streamfx::gfx::blur::gaussian_linear::get_type()
{
return ::streamfx::gfx::blur::type::Area;
}
double_t streamfx::gfx::blur::gaussian_linear::get_size()
{
return _size;
}
void streamfx::gfx::blur::gaussian_linear::set_size(double_t width)
{
if (width < 1.)
width = 1.;
if (width > ST_MAX_BLUR_SIZE)
width = ST_MAX_BLUR_SIZE;
_size = width;
}
void streamfx::gfx::blur::gaussian_linear::set_step_scale(double_t x, double_t y)
{
_step_scale.first = x;
_step_scale.second = y;
}
void streamfx::gfx::blur::gaussian_linear::get_step_scale(double_t& x, double_t& y)
{
x = _step_scale.first;
y = _step_scale.second;
}
double_t streamfx::gfx::blur::gaussian_linear::get_step_scale_x()
{
return _step_scale.first;
}
double_t streamfx::gfx::blur::gaussian_linear::get_step_scale_y()
{
return _step_scale.second;
}
std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::gaussian_linear::render()
{
auto gctx = streamfx::obs::gs::context();
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdmp = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Gaussian Linear Blur");
#endif
streamfx::obs::gs::effect effect = _data->get_effect();
auto kernel = _data->get_kernel(size_t(_size));
if (!effect || ((_step_scale.first + _step_scale.second) < std::numeric_limits<double_t>::epsilon())) {
return _input_texture;
}
float width = float(_input_texture->width());
float height = float(_input_texture->height());
// Setup
gs_set_cull_mode(GS_NEITHER);
gs_enable_color(true, true, true, true);
gs_enable_depth_test(false);
gs_depth_function(GS_ALWAYS);
gs_blend_state_push();
gs_reset_blend_state();
gs_enable_blending(false);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_ZERO, GS_ZERO, GS_ZERO);
effect.get_parameter("pImage").set_texture(_input_texture);
effect.get_parameter("pStepScale").set_float2(float(_step_scale.first), float(_step_scale.second));
effect.get_parameter("pSize").set_float(float(_size));
effect.get_parameter("pKernel").set_value(kernel.data(), ST_MAX_KERNEL_SIZE);
// First Pass
if (_step_scale.first > std::numeric_limits<double_t>::epsilon()) {
effect.get_parameter("pImageTexel").set_float2(float(1.f / width), 0.f);
{
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdm = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Horizontal");
#endif
auto op = _rendertarget2->render(uint32_t(width), uint32_t(height));
gs_ortho(0, 1., 0, 1., 0, 1.);
while (gs_effect_loop(effect.get_object(), "Draw")) {
_data->get_gfx_util()->draw_fullscreen_triangle();
}
}
std::swap(_rendertarget, _rendertarget2);
effect.get_parameter("pImage").set_texture(_rendertarget->get_texture());
}
// Second Pass
if (_step_scale.second > std::numeric_limits<double_t>::epsilon()) {
effect.get_parameter("pImageTexel").set_float2(0.f, float(1.f / height));
{
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdm = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Vertical");
#endif
auto op = _rendertarget2->render(uint32_t(width), uint32_t(height));
gs_ortho(0, 1., 0, 1., 0, 1.);
while (gs_effect_loop(effect.get_object(), "Draw")) {
_data->get_gfx_util()->draw_fullscreen_triangle();
}
}
std::swap(_rendertarget, _rendertarget2);
}
gs_blend_state_pop();
return this->get();
}
std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::gaussian_linear::get()
{
return _rendertarget->get_texture();
}
streamfx::gfx::blur::gaussian_linear_directional::gaussian_linear_directional() : _angle(0.) {}
streamfx::gfx::blur::gaussian_linear_directional::~gaussian_linear_directional() {}
::streamfx::gfx::blur::type streamfx::gfx::blur::gaussian_linear_directional::get_type()
{
return ::streamfx::gfx::blur::type::Directional;
}
double_t streamfx::gfx::blur::gaussian_linear_directional::get_angle()
{
return D_RAD_TO_DEG(_angle);
}
void streamfx::gfx::blur::gaussian_linear_directional::set_angle(double_t angle)
{
_angle = D_DEG_TO_RAD(angle);
}
std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::gaussian_linear_directional::render()
{
auto gctx = streamfx::obs::gs::context();
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdmp = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Gaussian Linear Directional Blur");
#endif
streamfx::obs::gs::effect effect = _data->get_effect();
auto kernel = _data->get_kernel(size_t(_size));
if (!effect || ((_step_scale.first + _step_scale.second) < std::numeric_limits<double_t>::epsilon())) {
return _input_texture;
}
float width = float(_input_texture->width());
float height = float(_input_texture->height());
// Setup
gs_set_cull_mode(GS_NEITHER);
gs_enable_color(true, true, true, true);
gs_enable_depth_test(false);
gs_depth_function(GS_ALWAYS);
gs_blend_state_push();
gs_reset_blend_state();
gs_enable_blending(false);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_ZERO, GS_ZERO, GS_ZERO);
effect.get_parameter("pImage").set_texture(_input_texture);
effect.get_parameter("pImageTexel").set_float2(float(1.f / width * cos(_angle)), float(1.f / height * sin(_angle)));
effect.get_parameter("pStepScale").set_float2(float(_step_scale.first), float(_step_scale.second));
effect.get_parameter("pSize").set_float(float(_size));
effect.get_parameter("pKernel").set_value(kernel.data(), ST_MAX_KERNEL_SIZE);
// First Pass
{
auto op = _rendertarget->render(uint32_t(width), uint32_t(height));
gs_ortho(0, 1., 0, 1., 0, 1.);
while (gs_effect_loop(effect.get_object(), "Draw")) {
_data->get_gfx_util()->draw_fullscreen_triangle();
}
}
gs_blend_state_pop();
return this->get();
}

View File

@ -0,0 +1,133 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2019-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "common.hpp"
#include "gfx-blur-base.hpp"
#include "gfx/gfx-util.hpp"
#include "obs/gs/gs-effect.hpp"
#include "obs/gs/gs-texrender.hpp"
#include "obs/gs/gs-texture.hpp"
#include "warning-disable.hpp"
#include <mutex>
#include <vector>
#include "warning-enable.hpp"
namespace streamfx::gfx {
namespace blur {
class gaussian_linear_data {
streamfx::obs::gs::effect _effect;
std::shared_ptr<streamfx::gfx::util> _gfx_util;
std::vector<std::vector<float>> _kernels;
public:
gaussian_linear_data();
virtual ~gaussian_linear_data();
std::shared_ptr<streamfx::gfx::util> get_gfx_util();
streamfx::obs::gs::effect get_effect();
std::vector<float> const& get_kernel(std::size_t width);
};
class gaussian_linear_factory : public ::streamfx::gfx::blur::ifactory {
std::mutex _data_lock;
std::weak_ptr<::streamfx::gfx::blur::gaussian_linear_data> _data;
public:
gaussian_linear_factory();
virtual ~gaussian_linear_factory() override;
virtual bool is_type_supported(::streamfx::gfx::blur::type type) override;
virtual std::shared_ptr<::streamfx::gfx::blur::base> create(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_size(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_size(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_size(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_angle(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_angle(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_angle(::streamfx::gfx::blur::type type) override;
virtual bool is_step_scale_supported(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_step_scale_x(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_step_scale_x(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_step_scale_x(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_step_scale_y(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_step_scale_y(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_step_scale_y(::streamfx::gfx::blur::type type) override;
std::shared_ptr<::streamfx::gfx::blur::gaussian_linear_data> data();
public: // Singleton
static ::streamfx::gfx::blur::gaussian_linear_factory& get();
};
class gaussian_linear : public ::streamfx::gfx::blur::base {
protected:
std::shared_ptr<::streamfx::gfx::blur::gaussian_linear_data> _data;
double_t _size;
std::pair<double_t, double_t> _step_scale;
std::shared_ptr<::streamfx::obs::gs::texture> _input_texture;
std::shared_ptr<::streamfx::obs::gs::texrender> _rendertarget;
private:
std::shared_ptr<::streamfx::obs::gs::texrender> _rendertarget2;
public:
gaussian_linear();
virtual ~gaussian_linear() override;
virtual void set_input(std::shared_ptr<::streamfx::obs::gs::texture> texture) override;
virtual ::streamfx::gfx::blur::type get_type() override;
virtual double_t get_size() override;
virtual void set_size(double_t width) override;
virtual void set_step_scale(double_t x, double_t y) override;
virtual void get_step_scale(double_t& x, double_t& y) override;
virtual double_t get_step_scale_x() override;
virtual double_t get_step_scale_y() override;
virtual std::shared_ptr<::streamfx::obs::gs::texture> render() override;
virtual std::shared_ptr<::streamfx::obs::gs::texture> get() override;
};
class gaussian_linear_directional : public ::streamfx::gfx::blur::gaussian_linear, public ::streamfx::gfx::blur::base_angle {
double_t _angle;
public:
gaussian_linear_directional();
virtual ~gaussian_linear_directional() override;
virtual ::streamfx::gfx::blur::type get_type() override;
virtual double_t get_angle() override;
virtual void set_angle(double_t angle) override;
virtual std::shared_ptr<::streamfx::obs::gs::texture> render() override;
};
} // namespace blur
} // namespace streamfx::gfx

View File

@ -0,0 +1,614 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2019-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// AUTOGENERATED COPYRIGHT HEADER END
#include "gfx-blur-gaussian.hpp"
#include "common.hpp"
#include "gfx/gfx-util.hpp"
#include "obs/gs/gs-helper.hpp"
#include "plugin.hpp"
#include "warning-disable.hpp"
#include <algorithm>
#include <stdexcept>
#include "warning-enable.hpp"
// TODO: It may be possible to optimize to run much faster: https://rastergrid.com/blog/2010/09/efficient-gaussian-blur-with-linear-sampling/
#define ST_KERNEL_SIZE 128u
#define ST_OVERSAMPLE_MULTIPLIER 2
#define ST_MAX_BLUR_SIZE ST_KERNEL_SIZE / ST_OVERSAMPLE_MULTIPLIER
streamfx::gfx::blur::gaussian_data::gaussian_data() : _gfx_util(::streamfx::gfx::util::get())
{
using namespace streamfx::util;
std::array<double, ST_KERNEL_SIZE> kernel_dbl;
std::vector<float> kernel(ST_KERNEL_SIZE);
{
auto gctx = streamfx::obs::gs::context();
{
auto file = streamfx::data_file_path("effects/blur/gaussian.effect");
try {
_effect = streamfx::obs::gs::effect::create(file);
} catch (const std::exception& ex) {
DLOG_ERROR("Error loading '%s': %s", file.generic_u8string().c_str(), ex.what());
}
}
}
//#define ST_USE_PASCAL_TRIANGLE
// Pre-calculate Kernel Information for all Kernel sizes
for (size_t size = 1; size <= ST_MAX_BLUR_SIZE; size++) {
#ifdef ST_USE_PASCAL_TRIANGLE
// The Pascal Triangle can be used to generate Gaussian Kernels, which is
// significantly faster than doing the same task with searching. It is also
// much more accurate at the same time, so it is a 2-in-1 solution.
// Generate the required row and sum.
size_t offset = size;
size_t row = size * 2;
auto triangle = math::pascal_triangle<double>(row);
double sum = pow(2, row);
// Convert all integers to floats.
double accum = 0.;
for (size_t idx = offset; idx < std::min<size_t>(triangle.size(), ST_KERNEL_SIZE); idx++) {
double v = static_cast<double>(triangle[idx]) / sum;
kernel_dbl[idx - offset] = v;
// Accumulator needed as we end up with float inaccuracies above a certain threshold.
accum += v * (idx > offset ? 2 : 1);
}
// Rescale all values back into useful ranges.
accum = 1. / accum;
for (size_t idx = offset; idx < ST_KERNEL_SIZE; idx++) {
kernel[idx - offset] = kernel_dbl[idx - offset] * accum;
}
#else
size_t oversample = size * ST_OVERSAMPLE_MULTIPLIER;
// Generate initial weights and calculate a total from them.
double total = 0.;
for (size_t idx = 0; (idx < oversample) && (idx < ST_KERNEL_SIZE); idx++) {
kernel_dbl[idx] = math::gaussian<double>(static_cast<double>(idx), static_cast<double>(size));
total += kernel_dbl[idx] * (idx > 0 ? 2 : 1);
}
// Scale the weights according to the total gathered, and convert to float.
for (size_t idx = 0; (idx < oversample) && (idx < ST_KERNEL_SIZE); idx++) {
kernel_dbl[idx] /= total;
kernel[idx] = static_cast<float>(kernel_dbl[idx]);
}
#endif
// Store Kernel
_kernels.insert_or_assign(size, kernel);
}
}
streamfx::gfx::blur::gaussian_data::~gaussian_data()
{
auto gctx = streamfx::obs::gs::context();
_effect.reset();
}
streamfx::obs::gs::effect streamfx::gfx::blur::gaussian_data::get_effect()
{
return _effect;
}
std::shared_ptr<streamfx::gfx::util> streamfx::gfx::blur::gaussian_data::get_gfx_util()
{
return _gfx_util;
}
std::vector<float> const& streamfx::gfx::blur::gaussian_data::get_kernel(std::size_t width)
{
width = std::clamp<size_t>(width, 1, ST_MAX_BLUR_SIZE);
return _kernels.at(width);
}
streamfx::gfx::blur::gaussian_factory::gaussian_factory() {}
streamfx::gfx::blur::gaussian_factory::~gaussian_factory() {}
bool streamfx::gfx::blur::gaussian_factory::is_type_supported(::streamfx::gfx::blur::type v)
{
switch (v) {
case ::streamfx::gfx::blur::type::Area:
return true;
case ::streamfx::gfx::blur::type::Directional:
return true;
case ::streamfx::gfx::blur::type::Rotational:
return true;
case ::streamfx::gfx::blur::type::Zoom:
return true;
default:
return false;
}
}
std::shared_ptr<::streamfx::gfx::blur::base> streamfx::gfx::blur::gaussian_factory::create(::streamfx::gfx::blur::type v)
{
switch (v) {
case ::streamfx::gfx::blur::type::Area:
return std::make_shared<::streamfx::gfx::blur::gaussian>();
case ::streamfx::gfx::blur::type::Directional:
return std::static_pointer_cast<::streamfx::gfx::blur::gaussian>(std::make_shared<::streamfx::gfx::blur::gaussian_directional>());
case ::streamfx::gfx::blur::type::Rotational:
return std::make_shared<::streamfx::gfx::blur::gaussian_rotational>();
case ::streamfx::gfx::blur::type::Zoom:
return std::make_shared<::streamfx::gfx::blur::gaussian_zoom>();
default:
throw std::runtime_error("Invalid type.");
}
}
double_t streamfx::gfx::blur::gaussian_factory::get_min_size(::streamfx::gfx::blur::type)
{
return double_t(1.0);
}
double_t streamfx::gfx::blur::gaussian_factory::get_step_size(::streamfx::gfx::blur::type)
{
return double_t(1.0);
}
double_t streamfx::gfx::blur::gaussian_factory::get_max_size(::streamfx::gfx::blur::type)
{
return double_t(ST_MAX_BLUR_SIZE);
}
double_t streamfx::gfx::blur::gaussian_factory::get_min_angle(::streamfx::gfx::blur::type v)
{
switch (v) {
case ::streamfx::gfx::blur::type::Directional:
case ::streamfx::gfx::blur::type::Rotational:
return -180.0;
default:
return 0;
}
}
double_t streamfx::gfx::blur::gaussian_factory::get_step_angle(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::gaussian_factory::get_max_angle(::streamfx::gfx::blur::type v)
{
switch (v) {
case ::streamfx::gfx::blur::type::Directional:
case ::streamfx::gfx::blur::type::Rotational:
return 180.0;
default:
return 0;
}
}
bool streamfx::gfx::blur::gaussian_factory::is_step_scale_supported(::streamfx::gfx::blur::type v)
{
switch (v) {
case ::streamfx::gfx::blur::type::Area:
case ::streamfx::gfx::blur::type::Zoom:
case ::streamfx::gfx::blur::type::Directional:
return true;
default:
return false;
}
}
double_t streamfx::gfx::blur::gaussian_factory::get_min_step_scale_x(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::gaussian_factory::get_step_step_scale_x(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::gaussian_factory::get_max_step_scale_x(::streamfx::gfx::blur::type)
{
return double_t(1000.0);
}
double_t streamfx::gfx::blur::gaussian_factory::get_min_step_scale_y(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::gaussian_factory::get_step_step_scale_y(::streamfx::gfx::blur::type)
{
return double_t(0.01);
}
double_t streamfx::gfx::blur::gaussian_factory::get_max_step_scale_y(::streamfx::gfx::blur::type)
{
return double_t(1000.0);
}
std::shared_ptr<::streamfx::gfx::blur::gaussian_data> streamfx::gfx::blur::gaussian_factory::data()
{
std::unique_lock<std::mutex> ulock(_data_lock);
std::shared_ptr<::streamfx::gfx::blur::gaussian_data> data = _data.lock();
if (!data) {
data = std::make_shared<::streamfx::gfx::blur::gaussian_data>();
_data = data;
}
return data;
}
::streamfx::gfx::blur::gaussian_factory& streamfx::gfx::blur::gaussian_factory::get()
{
static ::streamfx::gfx::blur::gaussian_factory instance;
return instance;
}
streamfx::gfx::blur::gaussian::gaussian() : _data(::streamfx::gfx::blur::gaussian_factory::get().data()), _size(1.), _step_scale({1., 1.})
{
auto gctx = streamfx::obs::gs::context();
_rendertarget = std::make_shared<streamfx::obs::gs::texrender>(GS_RGBA, GS_ZS_NONE);
_rendertarget2 = std::make_shared<streamfx::obs::gs::texrender>(GS_RGBA, GS_ZS_NONE);
}
streamfx::gfx::blur::gaussian::~gaussian() {}
void streamfx::gfx::blur::gaussian::set_input(std::shared_ptr<::streamfx::obs::gs::texture> texture)
{
_input_texture = std::move(texture);
}
::streamfx::gfx::blur::type streamfx::gfx::blur::gaussian::get_type()
{
return ::streamfx::gfx::blur::type::Area;
}
double_t streamfx::gfx::blur::gaussian::get_size()
{
return _size;
}
void streamfx::gfx::blur::gaussian::set_size(double_t width)
{
if (width < 1.)
width = 1.;
if (width > ST_MAX_BLUR_SIZE)
width = ST_MAX_BLUR_SIZE;
_size = width;
}
void streamfx::gfx::blur::gaussian::set_step_scale(double_t x, double_t y)
{
_step_scale.first = x;
_step_scale.second = y;
}
void streamfx::gfx::blur::gaussian::get_step_scale(double_t& x, double_t& y)
{
x = _step_scale.first;
y = _step_scale.second;
}
double_t streamfx::gfx::blur::gaussian::get_step_scale_x()
{
return _step_scale.first;
}
double_t streamfx::gfx::blur::gaussian::get_step_scale_y()
{
return _step_scale.second;
}
std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::gaussian::render()
{
auto gctx = streamfx::obs::gs::context();
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdmp = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Gaussian Blur");
#endif
streamfx::obs::gs::effect effect = _data->get_effect();
if (!effect || ((_step_scale.first + _step_scale.second) < std::numeric_limits<double_t>::epsilon())) {
return _input_texture;
}
auto kernel = _data->get_kernel(size_t(_size));
float width = float(_input_texture->width());
float height = float(_input_texture->height());
// Setup
gs_set_cull_mode(GS_NEITHER);
gs_enable_color(true, true, true, true);
gs_enable_depth_test(false);
gs_depth_function(GS_ALWAYS);
gs_blend_state_push();
gs_reset_blend_state();
gs_enable_blending(false);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_ZERO, GS_ZERO, GS_ZERO);
effect.get_parameter("pStepScale").set_float2(float(_step_scale.first), float(_step_scale.second));
effect.get_parameter("pSize").set_float(float(_size * ST_OVERSAMPLE_MULTIPLIER));
effect.get_parameter("pKernel").set_value(kernel.data(), ST_KERNEL_SIZE);
// First Pass
if (_step_scale.first > std::numeric_limits<double_t>::epsilon()) {
effect.get_parameter("pImage").set_texture(_input_texture);
effect.get_parameter("pImageTexel").set_float2(float(1.f / width), 0.f);
{
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdm = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Horizontal");
#endif
auto op = _rendertarget2->render(uint32_t(width), uint32_t(height));
gs_ortho(0, 1., 0, 1., 0, 1.);
while (gs_effect_loop(effect.get_object(), "Draw")) {
_data->get_gfx_util()->draw_fullscreen_triangle();
}
}
std::swap(_rendertarget, _rendertarget2);
}
// Second Pass
if (_step_scale.second > std::numeric_limits<double_t>::epsilon()) {
effect.get_parameter("pImage").set_texture(_rendertarget->get_texture());
effect.get_parameter("pImageTexel").set_float2(0.f, float(1.f / height));
{
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdm = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Vertical");
#endif
auto op = _rendertarget2->render(uint32_t(width), uint32_t(height));
gs_ortho(0, 1., 0, 1., 0, 1.);
while (gs_effect_loop(effect.get_object(), "Draw")) {
_data->get_gfx_util()->draw_fullscreen_triangle();
}
}
std::swap(_rendertarget, _rendertarget2);
}
gs_blend_state_pop();
return this->get();
}
std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::gaussian::get()
{
return _rendertarget->get_texture();
}
streamfx::gfx::blur::gaussian_directional::gaussian_directional() : m_angle(0.) {}
streamfx::gfx::blur::gaussian_directional::~gaussian_directional() {}
::streamfx::gfx::blur::type streamfx::gfx::blur::gaussian_directional::get_type()
{
return ::streamfx::gfx::blur::type::Directional;
}
double_t streamfx::gfx::blur::gaussian_directional::get_angle()
{
return D_RAD_TO_DEG(m_angle);
}
void streamfx::gfx::blur::gaussian_directional::set_angle(double_t angle)
{
m_angle = D_DEG_TO_RAD(angle);
}
std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::gaussian_directional::render()
{
auto gctx = streamfx::obs::gs::context();
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdmp = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Gaussian Directional Blur");
#endif
streamfx::obs::gs::effect effect = _data->get_effect();
if (!effect || ((_step_scale.first + _step_scale.second) < std::numeric_limits<double_t>::epsilon())) {
return _input_texture;
}
auto kernel = _data->get_kernel(size_t(_size));
float width = float(_input_texture->width());
float height = float(_input_texture->height());
// Setup
gs_set_cull_mode(GS_NEITHER);
gs_enable_color(true, true, true, true);
gs_enable_depth_test(false);
gs_depth_function(GS_ALWAYS);
gs_blend_state_push();
gs_reset_blend_state();
gs_enable_blending(false);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_ZERO, GS_ZERO, GS_ZERO);
effect.get_parameter("pImage").set_texture(_input_texture);
effect.get_parameter("pImageTexel").set_float2(float(1.f / width * cos(m_angle)), float(1.f / height * sin(m_angle)));
effect.get_parameter("pStepScale").set_float2(float(_step_scale.first), float(_step_scale.second));
effect.get_parameter("pSize").set_float(float(_size * ST_OVERSAMPLE_MULTIPLIER));
effect.get_parameter("pKernel").set_value(kernel.data(), ST_KERNEL_SIZE);
{
auto op = _rendertarget->render(uint32_t(width), uint32_t(height));
gs_ortho(0, 1., 0, 1., 0, 1.);
while (gs_effect_loop(effect.get_object(), "Draw")) {
_data->get_gfx_util()->draw_fullscreen_triangle();
}
}
gs_blend_state_pop();
return this->get();
}
::streamfx::gfx::blur::type streamfx::gfx::blur::gaussian_rotational::get_type()
{
return ::streamfx::gfx::blur::type::Rotational;
}
std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::gaussian_rotational::render()
{
auto gctx = streamfx::obs::gs::context();
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdmp = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Gaussian Rotational Blur");
#endif
streamfx::obs::gs::effect effect = _data->get_effect();
if (!effect || ((_step_scale.first + _step_scale.second) < std::numeric_limits<double_t>::epsilon())) {
return _input_texture;
}
auto kernel = _data->get_kernel(size_t(_size));
float width = float(_input_texture->width());
float height = float(_input_texture->height());
// Setup
gs_set_cull_mode(GS_NEITHER);
gs_enable_color(true, true, true, true);
gs_enable_depth_test(false);
gs_depth_function(GS_ALWAYS);
gs_blend_state_push();
gs_reset_blend_state();
gs_enable_blending(false);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_ZERO, GS_ZERO, GS_ZERO);
effect.get_parameter("pImage").set_texture(_input_texture);
effect.get_parameter("pImageTexel").set_float2(float(1.f / width), float(1.f / height));
effect.get_parameter("pStepScale").set_float2(float(_step_scale.first), float(_step_scale.second));
effect.get_parameter("pSize").set_float(float(_size * ST_OVERSAMPLE_MULTIPLIER));
effect.get_parameter("pAngle").set_float(float(m_angle / _size));
effect.get_parameter("pCenter").set_float2(float(m_center.first), float(m_center.second));
effect.get_parameter("pKernel").set_value(kernel.data(), ST_KERNEL_SIZE);
// First Pass
{
auto op = _rendertarget->render(uint32_t(width), uint32_t(height));
gs_ortho(0, 1., 0, 1., 0, 1.);
while (gs_effect_loop(effect.get_object(), "Rotate")) {
_data->get_gfx_util()->draw_fullscreen_triangle();
}
}
gs_blend_state_pop();
return this->get();
}
void streamfx::gfx::blur::gaussian_rotational::set_center(double_t x, double_t y)
{
m_center.first = x;
m_center.second = y;
}
void streamfx::gfx::blur::gaussian_rotational::get_center(double_t& x, double_t& y)
{
x = m_center.first;
y = m_center.second;
}
double_t streamfx::gfx::blur::gaussian_rotational::get_angle()
{
return double_t(D_RAD_TO_DEG(m_angle));
}
void streamfx::gfx::blur::gaussian_rotational::set_angle(double_t angle)
{
m_angle = D_DEG_TO_RAD(angle);
}
::streamfx::gfx::blur::type streamfx::gfx::blur::gaussian_zoom::get_type()
{
return ::streamfx::gfx::blur::type::Zoom;
}
std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::gaussian_zoom::render()
{
auto gctx = streamfx::obs::gs::context();
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
auto gdmp = streamfx::obs::gs::debug_marker(streamfx::obs::gs::debug_color_azure_radiance, "Gaussian Zoom Blur");
#endif
streamfx::obs::gs::effect effect = _data->get_effect();
auto kernel = _data->get_kernel(size_t(_size));
if (!effect || ((_step_scale.first + _step_scale.second) < std::numeric_limits<double_t>::epsilon())) {
return _input_texture;
}
float width = float(_input_texture->width());
float height = float(_input_texture->height());
// Setup
gs_set_cull_mode(GS_NEITHER);
gs_enable_color(true, true, true, true);
gs_enable_depth_test(false);
gs_depth_function(GS_ALWAYS);
gs_blend_state_push();
gs_reset_blend_state();
gs_enable_blending(false);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_ZERO, GS_ZERO, GS_ZERO);
effect.get_parameter("pImage").set_texture(_input_texture);
effect.get_parameter("pImageTexel").set_float2(float(1.f / width), float(1.f / height));
effect.get_parameter("pStepScale").set_float2(float(_step_scale.first), float(_step_scale.second));
effect.get_parameter("pSize").set_float(float(_size));
effect.get_parameter("pCenter").set_float2(float(m_center.first), float(m_center.second));
effect.get_parameter("pKernel").set_value(kernel.data(), ST_KERNEL_SIZE);
// First Pass
{
auto op = _rendertarget->render(uint32_t(width), uint32_t(height));
gs_ortho(0, 1., 0, 1., 0, 1.);
while (gs_effect_loop(effect.get_object(), "Zoom")) {
_data->get_gfx_util()->draw_fullscreen_triangle();
}
}
gs_blend_state_pop();
return this->get();
}
void streamfx::gfx::blur::gaussian_zoom::set_center(double_t x, double_t y)
{
m_center.first = x;
m_center.second = y;
}
void streamfx::gfx::blur::gaussian_zoom::get_center(double_t& x, double_t& y)
{
x = m_center.first;
y = m_center.second;
}

View File

@ -0,0 +1,160 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2019-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "common.hpp"
#include "gfx-blur-base.hpp"
#include "gfx/gfx-util.hpp"
#include "obs/gs/gs-effect.hpp"
#include "obs/gs/gs-texrender.hpp"
#include "obs/gs/gs-texture.hpp"
#include "warning-disable.hpp"
#include <mutex>
#include <vector>
#include "warning-enable.hpp"
namespace streamfx::gfx {
namespace blur {
class gaussian_data {
streamfx::obs::gs::effect _effect;
std::shared_ptr<streamfx::gfx::util> _gfx_util;
std::map<size_t, std::vector<float>> _kernels;
public:
gaussian_data();
virtual ~gaussian_data();
streamfx::obs::gs::effect get_effect();
std::shared_ptr<streamfx::gfx::util> get_gfx_util();
std::vector<float> const& get_kernel(std::size_t width);
};
class gaussian_factory : public ::streamfx::gfx::blur::ifactory {
std::mutex _data_lock;
std::weak_ptr<::streamfx::gfx::blur::gaussian_data> _data;
public:
gaussian_factory();
virtual ~gaussian_factory() override;
virtual bool is_type_supported(::streamfx::gfx::blur::type type) override;
virtual std::shared_ptr<::streamfx::gfx::blur::base> create(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_size(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_size(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_size(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_angle(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_angle(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_angle(::streamfx::gfx::blur::type type) override;
virtual bool is_step_scale_supported(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_step_scale_x(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_step_scale_x(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_step_scale_x(::streamfx::gfx::blur::type type) override;
virtual double_t get_min_step_scale_y(::streamfx::gfx::blur::type type) override;
virtual double_t get_step_step_scale_y(::streamfx::gfx::blur::type type) override;
virtual double_t get_max_step_scale_y(::streamfx::gfx::blur::type type) override;
std::shared_ptr<::streamfx::gfx::blur::gaussian_data> data();
public: // Singleton
static ::streamfx::gfx::blur::gaussian_factory& get();
};
class gaussian : public ::streamfx::gfx::blur::base {
protected:
std::shared_ptr<::streamfx::gfx::blur::gaussian_data> _data;
double_t _size;
std::pair<double_t, double_t> _step_scale;
std::shared_ptr<::streamfx::obs::gs::texture> _input_texture;
std::shared_ptr<::streamfx::obs::gs::texrender> _rendertarget;
private:
std::shared_ptr<::streamfx::obs::gs::texrender> _rendertarget2;
public:
gaussian();
virtual ~gaussian() override;
virtual void set_input(std::shared_ptr<::streamfx::obs::gs::texture> texture) override;
virtual ::streamfx::gfx::blur::type get_type() override;
virtual double_t get_size() override;
virtual void set_size(double_t width) override;
virtual void set_step_scale(double_t x, double_t y) override;
virtual void get_step_scale(double_t& x, double_t& y) override;
virtual double_t get_step_scale_x() override;
virtual double_t get_step_scale_y() override;
virtual std::shared_ptr<::streamfx::obs::gs::texture> render() override;
virtual std::shared_ptr<::streamfx::obs::gs::texture> get() override;
};
class gaussian_directional : public ::streamfx::gfx::blur::gaussian, public ::streamfx::gfx::blur::base_angle {
double_t m_angle;
public:
gaussian_directional();
virtual ~gaussian_directional() override;
virtual ::streamfx::gfx::blur::type get_type() override;
virtual double_t get_angle() override;
virtual void set_angle(double_t angle) override;
virtual std::shared_ptr<::streamfx::obs::gs::texture> render() override;
};
class gaussian_rotational : public ::streamfx::gfx::blur::gaussian, public ::streamfx::gfx::blur::base_angle, public ::streamfx::gfx::blur::base_center {
std::pair<double_t, double_t> m_center;
double_t m_angle;
public:
virtual ::streamfx::gfx::blur::type get_type() override;
virtual void set_center(double_t x, double_t y) override;
virtual void get_center(double_t& x, double_t& y) override;
virtual double_t get_angle() override;
virtual void set_angle(double_t angle) override;
virtual std::shared_ptr<::streamfx::obs::gs::texture> render() override;
};
class gaussian_zoom : public ::streamfx::gfx::blur::gaussian, public ::streamfx::gfx::blur::base_center {
std::pair<double_t, double_t> m_center;
public:
virtual ::streamfx::gfx::blur::type get_type() override;
virtual void set_center(double_t x, double_t y) override;
virtual void get_center(double_t& x, double_t& y) override;
virtual std::shared_ptr<::streamfx::obs::gs::texture> render() override;
};
} // namespace blur
} // namespace streamfx::gfx

View File

@ -0,0 +1,9 @@
# AUTOGENERATED COPYRIGHT HEADER START
# Copyright (C) 2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
# AUTOGENERATED COPYRIGHT HEADER END
cmake_minimum_required(VERSION 3.26)
project("ColorGrade")
list(APPEND CMAKE_MESSAGE_INDENT "[${PROJECT_NAME}] ")
streamfx_add_component("Color Grade")

View File

@ -0,0 +1,855 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2019-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#include "filter-color-grade.hpp"
#include "strings.hpp"
#include "gfx/gfx-util.hpp"
#include "obs/gs/gs-helper.hpp"
#include "util/util-logging.hpp"
#include "warning-disable.hpp"
#include <stdexcept>
#include "warning-enable.hpp"
// OBS
#include "warning-disable.hpp"
extern "C" {
#include <graphics/graphics.h>
#include <graphics/matrix4.h>
#include <util/platform.h>
}
#include "warning-enable.hpp"
#ifdef _DEBUG
#define ST_PREFIX "<%s> "
#define D_LOG_ERROR(x, ...) P_LOG_ERROR(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#define D_LOG_WARNING(x, ...) P_LOG_WARN(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#define D_LOG_INFO(x, ...) P_LOG_INFO(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#define D_LOG_DEBUG(x, ...) P_LOG_DEBUG(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#else
#define ST_PREFIX "<filter::color_grade> "
#define D_LOG_ERROR(...) P_LOG_ERROR(ST_PREFIX __VA_ARGS__)
#define D_LOG_WARNING(...) P_LOG_WARN(ST_PREFIX __VA_ARGS__)
#define D_LOG_INFO(...) P_LOG_INFO(ST_PREFIX __VA_ARGS__)
#define D_LOG_DEBUG(...) P_LOG_DEBUG(ST_PREFIX __VA_ARGS__)
#endif
#define ST_I18N "Filter.ColorGrade"
// Lift
#define ST_KEY_LIFT "Filter.ColorGrade.Lift"
#define ST_I18N_LIFT ST_I18N ".Lift"
#define ST_KEY_LIFT_(x) ST_KEY_LIFT "." x
#define ST_I18N_LIFT_(x) ST_I18N_LIFT "." x
// Gamma
#define ST_KEY_GAMMA "Filter.ColorGrade.Gamma"
#define ST_I18N_GAMMA ST_I18N ".Gamma"
#define ST_KEY_GAMMA_(x) ST_KEY_GAMMA "." x
#define ST_I18N_GAMMA_(x) ST_I18N_GAMMA "." x
// Gain
#define ST_KEY_GAIN "Filter.ColorGrade.Gain"
#define ST_I18N_GAIN ST_I18N ".Gain"
#define ST_KEY_GAIN_(x) ST_KEY_GAIN "." x
#define ST_I18N_GAIN_(x) ST_I18N_GAIN "." x
// Offset
#define ST_KEY_OFFSET "Filter.ColorGrade.Offset"
#define ST_I18N_OFFSET ST_I18N ".Offset"
#define ST_KEY_OFFSET_(x) ST_KEY_OFFSET "." x
#define ST_I18N_OFFSET_(x) ST_I18N_OFFSET "." x
// Tint
#define ST_KEY_TINT "Filter.ColorGrade.Tint"
#define ST_I18N_TINT ST_I18N ".Tint"
#define ST_KEY_TINT_DETECTION ST_KEY_TINT ".Detection"
#define ST_I18N_TINT_DETECTION ST_I18N_TINT ".Detection"
#define ST_I18N_TINT_DETECTION_(x) ST_I18N_TINT_DETECTION "." x
#define ST_KEY_TINT_MODE ST_KEY_TINT ".Mode"
#define ST_I18N_TINT_MODE ST_I18N_TINT ".Mode"
#define ST_I18N_TINT_MODE_(x) ST_I18N_TINT_MODE "." x
#define ST_KEY_TINT_EXPONENT ST_KEY_TINT ".Exponent"
#define ST_I18N_TINT_EXPONENT ST_I18N_TINT ".Exponent"
#define ST_KEY_TINT_(x, y) ST_KEY_TINT "." x "." y
#define ST_I18N_TINT_(x, y) ST_I18N_TINT "." x "." y
// Color Correction
#define ST_KEY_CORRECTION "Filter.ColorGrade.Correction"
#define ST_KEY_CORRECTION_(x) ST_KEY_CORRECTION "." x
#define ST_I18N_CORRECTION ST_I18N ".Correction"
#define ST_I18N_CORRECTION_(x) ST_I18N_CORRECTION "." x
// Render Mode
#define ST_KEY_RENDERMODE "Filter.ColorGrade.RenderMode"
#define ST_I18N_RENDERMODE ST_I18N ".RenderMode"
#define ST_I18N_RENDERMODE_DIRECT ST_I18N_RENDERMODE ".Direct"
#define ST_I18N_RENDERMODE_LUT_2BIT ST_I18N_RENDERMODE ".LUT.2Bit"
#define ST_I18N_RENDERMODE_LUT_4BIT ST_I18N_RENDERMODE ".LUT.4Bit"
#define ST_I18N_RENDERMODE_LUT_6BIT ST_I18N_RENDERMODE ".LUT.6Bit"
#define ST_I18N_RENDERMODE_LUT_8BIT ST_I18N_RENDERMODE ".LUT.8Bit"
#define ST_I18N_RENDERMODE_LUT_10BIT ST_I18N_RENDERMODE ".LUT.10Bit"
#define ST_RED "Red"
#define ST_GREEN "Green"
#define ST_BLUE "Blue"
#define ST_ALL "All"
#define ST_HUE "Hue"
#define ST_SATURATION "Saturation"
#define ST_LIGHTNESS "Lightness"
#define ST_CONTRAST "Contrast"
#define ST_TONE_LOW "Shadow"
#define ST_TONE_MID "Midtone"
#define ST_TONE_HIGH "Highlight"
#define ST_DETECTION_HSV "HSV"
#define ST_DETECTION_HSL "HSL"
#define ST_DETECTION_YUV_SDR "YUV.SDR"
#define ST_MODE_LINEAR "Linear"
#define ST_MODE_EXP "Exp"
#define ST_MODE_EXP2 "Exp2"
#define ST_MODE_LOG "Log"
#define ST_MODE_LOG10 "Log10"
using namespace streamfx::filter::color_grade;
static constexpr std::string_view HELP_URL = "https://github.com/Xaymar/obs-StreamFX/wiki/Filter-Color-Grade";
// TODO: Figure out a way to merge _lut_rt, _lut_texture, _rt_source, _rt_grad, _tex_source, _tex_grade, _source_updated and _grade_updated.
// Seriously this is too much GPU space wasted on unused trash.
color_grade_instance::~color_grade_instance() {}
color_grade_instance::color_grade_instance(obs_data_t* data, obs_source_t* self) : obs::source_instance(data, self), _effect(), _gfx_util(::streamfx::gfx::util::get()), _lift(), _gamma(), _gain(), _offset(), _tint_detection(), _tint_luma(), _tint_exponent(), _tint_low(), _tint_mid(), _tint_hig(), _correction(), _lut_enabled(true), _lut_depth(), _ccache_rt(), _ccache_texture(), _ccache_fresh(false), _lut_initialized(false), _lut_dirty(true), _lut_producer(), _lut_consumer(), _lut_rt(), _lut_texture(), _cache_rt(), _cache_texture(), _cache_fresh(false)
{
{
auto gctx = streamfx::obs::gs::context();
// Load the color grading effect.
{
auto file = streamfx::data_file_path("effects/color-grade.effect");
try {
_effect = streamfx::obs::gs::effect::create(file);
} catch (std::exception& ex) {
D_LOG_ERROR("Error loading '%s': %s", file.u8string().c_str(), ex.what());
throw;
}
}
// Initialize LUT work flow.
try {
_lut_producer = std::make_shared<streamfx::gfx::lut::producer>();
_lut_consumer = std::make_shared<streamfx::gfx::lut::consumer>();
_lut_initialized = true;
} catch (std::exception const& ex) {
D_LOG_WARNING("Failed to initialize LUT rendering, falling back to direct rendering.\n%s", ex.what());
_lut_initialized = false;
}
// Allocate render target for rendering.
try {
allocate_rendertarget(GS_RGBA);
} catch (std::exception const& ex) {
D_LOG_ERROR("Failed to acquire render target for rendering: %s", ex.what());
throw;
}
}
update(data);
}
void color_grade_instance::allocate_rendertarget(gs_color_format format)
{
_cache_rt = std::make_unique<streamfx::obs::gs::texrender>(format, GS_ZS_NONE);
}
float fix_gamma_value(double_t v)
{
if (v < 0.0) {
return static_cast<float>(-v + 1.0);
} else {
return static_cast<float>(1.0 / (v + 1.0));
}
}
void color_grade_instance::load(obs_data_t* data)
{
update(data);
}
void color_grade_instance::migrate(obs_data_t* data, uint64_t version) {}
void color_grade_instance::update(obs_data_t* data)
{
_lift.x = static_cast<float>(obs_data_get_double(data, ST_KEY_LIFT_(ST_RED)) / 100.0);
_lift.y = static_cast<float>(obs_data_get_double(data, ST_KEY_LIFT_(ST_GREEN)) / 100.0);
_lift.z = static_cast<float>(obs_data_get_double(data, ST_KEY_LIFT_(ST_BLUE)) / 100.0);
_lift.w = static_cast<float>(obs_data_get_double(data, ST_KEY_LIFT_(ST_ALL)) / 100.0);
_gamma.x = fix_gamma_value(obs_data_get_double(data, ST_KEY_GAMMA_(ST_RED)) / 100.0);
_gamma.y = fix_gamma_value(obs_data_get_double(data, ST_KEY_GAMMA_(ST_GREEN)) / 100.0);
_gamma.z = fix_gamma_value(obs_data_get_double(data, ST_KEY_GAMMA_(ST_BLUE)) / 100.0);
_gamma.w = fix_gamma_value(obs_data_get_double(data, ST_KEY_GAMMA_(ST_ALL)) / 100.0);
_gain.x = static_cast<float>(obs_data_get_double(data, ST_KEY_GAIN_(ST_RED)) / 100.0);
_gain.y = static_cast<float>(obs_data_get_double(data, ST_KEY_GAIN_(ST_GREEN)) / 100.0);
_gain.z = static_cast<float>(obs_data_get_double(data, ST_KEY_GAIN_(ST_BLUE)) / 100.0);
_gain.w = static_cast<float>(obs_data_get_double(data, ST_KEY_GAIN_(ST_ALL)) / 100.0);
_offset.x = static_cast<float>(obs_data_get_double(data, ST_KEY_OFFSET_(ST_RED)) / 100.0);
_offset.y = static_cast<float>(obs_data_get_double(data, ST_KEY_OFFSET_(ST_GREEN)) / 100.0);
_offset.z = static_cast<float>(obs_data_get_double(data, ST_KEY_OFFSET_(ST_BLUE)) / 100.0);
_offset.w = static_cast<float>(obs_data_get_double(data, ST_KEY_OFFSET_(ST_ALL)) / 100.0);
_tint_detection = static_cast<detection_mode>(obs_data_get_int(data, ST_KEY_TINT_DETECTION));
_tint_luma = static_cast<luma_mode>(obs_data_get_int(data, ST_KEY_TINT_MODE));
_tint_exponent = static_cast<float>(obs_data_get_double(data, ST_KEY_TINT_EXPONENT));
_tint_low.x = static_cast<float>(obs_data_get_double(data, ST_KEY_TINT_(ST_TONE_LOW, ST_RED)) / 100.0);
_tint_low.y = static_cast<float>(obs_data_get_double(data, ST_KEY_TINT_(ST_TONE_LOW, ST_GREEN)) / 100.0);
_tint_low.z = static_cast<float>(obs_data_get_double(data, ST_KEY_TINT_(ST_TONE_LOW, ST_BLUE)) / 100.0);
_tint_mid.x = static_cast<float>(obs_data_get_double(data, ST_KEY_TINT_(ST_TONE_MID, ST_RED)) / 100.0);
_tint_mid.y = static_cast<float>(obs_data_get_double(data, ST_KEY_TINT_(ST_TONE_MID, ST_GREEN)) / 100.0);
_tint_mid.z = static_cast<float>(obs_data_get_double(data, ST_KEY_TINT_(ST_TONE_MID, ST_BLUE)) / 100.0);
_tint_hig.x = static_cast<float>(obs_data_get_double(data, ST_KEY_TINT_(ST_TONE_HIGH, ST_RED)) / 100.0);
_tint_hig.y = static_cast<float>(obs_data_get_double(data, ST_KEY_TINT_(ST_TONE_HIGH, ST_GREEN)) / 100.0);
_tint_hig.z = static_cast<float>(obs_data_get_double(data, ST_KEY_TINT_(ST_TONE_HIGH, ST_BLUE)) / 100.0);
_correction.x = static_cast<float>(obs_data_get_double(data, ST_KEY_CORRECTION_(ST_HUE)) / 360.0);
_correction.y = static_cast<float>(obs_data_get_double(data, ST_KEY_CORRECTION_(ST_SATURATION)) / 100.0);
_correction.z = static_cast<float>(obs_data_get_double(data, ST_KEY_CORRECTION_(ST_LIGHTNESS)) / 100.0);
_correction.w = static_cast<float>(obs_data_get_double(data, ST_KEY_CORRECTION_(ST_CONTRAST)) / 100.0);
{
int64_t v = obs_data_get_int(data, ST_KEY_RENDERMODE);
// LUT status depends on selected option.
_lut_enabled = v != 0; // 0 (Direct)
if (v == -1) {
_lut_depth = streamfx::gfx::lut::color_depth::_8;
} else if (v > 0) {
_lut_depth = static_cast<streamfx::gfx::lut::color_depth>(v);
}
}
if (_lut_enabled && _lut_initialized)
_lut_dirty = true;
}
void color_grade_instance::prepare_effect()
{
if (auto p = _effect.get_parameter("pLift"); p) {
p.set_float4(_lift);
}
if (auto p = _effect.get_parameter("pGamma"); p) {
p.set_float4(_gamma);
}
if (auto p = _effect.get_parameter("pGain"); p) {
p.set_float4(_gain);
}
if (auto p = _effect.get_parameter("pOffset"); p) {
p.set_float4(_offset);
}
if (auto p = _effect.get_parameter("pLift"); p) {
p.set_float4(_lift);
}
if (auto p = _effect.get_parameter("pTintDetection"); p) {
p.set_int(static_cast<int32_t>(_tint_detection));
}
if (auto p = _effect.get_parameter("pTintMode"); p) {
p.set_int(static_cast<int32_t>(_tint_luma));
}
if (auto p = _effect.get_parameter("pTintExponent"); p) {
p.set_float(_tint_exponent);
}
if (auto p = _effect.get_parameter("pTintLow"); p) {
p.set_float3(_tint_low);
}
if (auto p = _effect.get_parameter("pTintMid"); p) {
p.set_float3(_tint_mid);
}
if (auto p = _effect.get_parameter("pTintHig"); p) {
p.set_float3(_tint_hig);
}
if (auto p = _effect.get_parameter("pCorrection"); p) {
p.set_float4(_correction);
}
}
void color_grade_instance::rebuild_lut()
{
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
streamfx::obs::gs::debug_marker gdm{streamfx::obs::gs::debug_color_cache, "Rebuild LUT"};
#endif
// Generate a fresh LUT texture.
auto lut_texture = _lut_producer->produce(_lut_depth);
// Modify the LUT with our color grade.
if (lut_texture) {
// Check if we have a render target to work with and if it's the correct format.
if (!_lut_rt || (lut_texture->color_format() != _lut_rt->color_format())) {
// Create a new render target with new format.
_lut_rt = std::make_unique<streamfx::obs::gs::texrender>(lut_texture->color_format(), GS_ZS_NONE);
}
// Prepare our color grade effect.
prepare_effect();
// Assign texture.
if (auto p = _effect.get_parameter("image"); p) {
p.set_texture(lut_texture);
}
{ // Begin rendering.
auto op = _lut_rt->render(lut_texture->width(), lut_texture->height());
// Set up graphics context.
gs_ortho(0, 1, 0, 1, 0, 1);
gs_blend_state_push();
gs_enable_blending(false);
gs_enable_color(true, true, true, true);
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
while (gs_effect_loop(_effect.get_object(), "Draw")) {
_gfx_util->draw_fullscreen_triangle();
}
gs_blend_state_pop();
}
_lut_rt->get_texture(_lut_texture);
if (!_lut_texture) {
throw std::runtime_error("Failed to produce modified LUT texture.");
}
} else {
throw std::runtime_error("Failed to produce LUT texture.");
}
_lut_dirty = false;
}
void color_grade_instance::video_tick(float)
{
_ccache_fresh = false;
_cache_fresh = false;
}
void color_grade_instance::video_render(gs_effect_t* shader)
{
// Grab initial values.
obs_source_t* parent = obs_filter_get_parent(_self);
obs_source_t* target = obs_filter_get_target(_self);
uint32_t width = obs_source_get_base_width(target);
uint32_t height = obs_source_get_base_height(target);
vec4 blank = vec4{0, 0, 0, 0};
shader = shader ? shader : obs_get_base_effect(OBS_EFFECT_DEFAULT);
// Skip filter if anything is wrong.
if (!parent || !target || !width || !height) {
obs_source_skip_video_filter(_self);
return;
}
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
streamfx::obs::gs::debug_marker gdmp{streamfx::obs::gs::debug_color_source, "Color Grading '%s'", obs_source_get_name(_self)};
#endif
// TODO: Optimize this once (https://github.com/obsproject/obs-studio/pull/4199) is merged.
// - We can skip the original capture and reduce the overall impact of this.
// 1. Capture the filter/source rendered above this.
if (!_ccache_fresh || !_ccache_texture) {
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
streamfx::obs::gs::debug_marker gdmp{streamfx::obs::gs::debug_color_cache, "Cache '%s'", obs_source_get_name(target)};
#endif
// If the input cache render target doesn't exist, create it.
if (!_ccache_rt) {
_ccache_rt = std::make_shared<streamfx::obs::gs::texrender>(GS_RGBA, GS_ZS_NONE);
}
{
auto op = _ccache_rt->render(width, height);
gs_ortho(0, static_cast<float>(width), 0, static_cast<float>(height), 0, 1);
// Blank out the input cache.
gs_clear(GS_CLEAR_COLOR | GS_CLEAR_DEPTH, &blank, 0., 0);
// Begin rendering the actual input source.
obs_source_process_filter_begin(_self, GS_RGBA, OBS_ALLOW_DIRECT_RENDERING);
// Enable all colors for rendering.
gs_enable_color(true, true, true, true);
// Prevent blending with existing content, even if it is cleared.
gs_blend_state_push();
gs_enable_blending(false);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
// Disable depth testing.
gs_enable_depth_test(false);
// Disable stencil testing.
gs_enable_stencil_test(false);
// Disable culling.
gs_set_cull_mode(GS_NEITHER);
// End rendering the actual input source.
obs_source_process_filter_end(_self, obs_get_base_effect(OBS_EFFECT_DEFAULT), width, height);
// Restore original blend mode.
gs_blend_state_pop();
}
// Try and retrieve the input cache as a texture for later use.
_ccache_rt->get_texture(_ccache_texture);
if (!_ccache_texture) {
throw std::runtime_error("Failed to cache original source.");
}
// Mark the input cache as valid.
_ccache_fresh = true;
}
// 2. Apply one of the two rendering methods (LUT or Direct).
if (_lut_initialized && _lut_enabled) { // Try to apply with the LUT based method.
try {
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
streamfx::obs::gs::debug_marker gdm{streamfx::obs::gs::debug_color_convert, "LUT Rendering"};
#endif
// If the LUT was changed, rebuild the LUT first.
if (_lut_dirty) {
rebuild_lut();
// Mark the cache as invalid, since the LUT has been changed.
_cache_fresh = false;
}
// Reallocate the rendertarget if necessary.
if (_cache_rt->color_format() != GS_RGBA) {
allocate_rendertarget(GS_RGBA);
}
if (!_cache_fresh) {
{ // Render the source to the cache.
auto op = _cache_rt->render(width, height);
gs_ortho(0, 1., 0, 1., 0, 1);
// Blank out the input cache.
gs_clear(GS_CLEAR_COLOR | GS_CLEAR_DEPTH, &blank, 0., 0);
// Enable all colors for rendering.
gs_enable_color(true, true, true, true);
// Prevent blending with existing content, even if it is cleared.
gs_blend_state_push();
gs_enable_blending(false);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
// Disable depth testing.
gs_enable_depth_test(false);
// Disable stencil testing.
gs_enable_stencil_test(false);
// Disable culling.
gs_set_cull_mode(GS_NEITHER);
auto effect = _lut_consumer->prepare(_lut_depth, _lut_texture);
effect->get_parameter("image").set_texture(_ccache_texture);
while (gs_effect_loop(effect->get_object(), "Draw")) {
_gfx_util->draw_fullscreen_triangle();
}
// Restore original blend mode.
gs_blend_state_pop();
}
// Try and retrieve the render cache as a texture.
_cache_rt->get_texture(_cache_texture);
// Mark the render cache as valid.
_cache_fresh = true;
}
} catch (std::exception const& ex) {
// If anything happened, revert to direct rendering.
_lut_rt.reset();
_lut_texture.reset();
_lut_enabled = false;
D_LOG_WARNING("Reverting to direct rendering due to error: %s", ex.what());
}
}
if ((!_lut_initialized || !_lut_enabled) && !_cache_fresh) {
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
streamfx::obs::gs::debug_marker gdm{streamfx::obs::gs::debug_color_convert, "Direct Rendering"};
#endif
// Reallocate the rendertarget if necessary.
if (_cache_rt->color_format() != GS_RGBA) {
allocate_rendertarget(GS_RGBA);
}
{ // Render the source to the cache.
auto op = _cache_rt->render(width, height);
gs_ortho(0, 1, 0, 1, 0, 1);
prepare_effect();
// Blank out the input cache.
gs_clear(GS_CLEAR_COLOR | GS_CLEAR_DEPTH, &blank, 0., 0);
// Enable all colors for rendering.
gs_enable_color(true, true, true, true);
// Prevent blending with existing content, even if it is cleared.
gs_blend_state_push();
gs_enable_blending(false);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
// Disable depth testing.
gs_enable_depth_test(false);
// Disable stencil testing.
gs_enable_stencil_test(false);
// Disable culling.
gs_set_cull_mode(GS_NEITHER);
// Render the effect.
_effect.get_parameter("image").set_texture(_ccache_texture);
while (gs_effect_loop(_effect.get_object(), "Draw")) {
_gfx_util->draw_fullscreen_triangle();
}
// Restore original blend mode.
gs_blend_state_pop();
}
// Try and retrieve the render cache as a texture.
_cache_rt->get_texture(_cache_texture);
// Mark the render cache as valid.
_cache_fresh = true;
}
if (!_cache_texture) {
throw std::runtime_error("Failed to cache processed source.");
}
// 3. Render the output cache.
{
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
streamfx::obs::gs::debug_marker gdm{streamfx::obs::gs::debug_color_cache_render, "Draw Cache"};
#endif
// Revert GPU status to what OBS Studio expects.
gs_enable_depth_test(false);
gs_enable_color(true, true, true, true);
gs_set_cull_mode(GS_NEITHER);
// Draw the render cache.
while (gs_effect_loop(shader, "Draw")) {
gs_effect_set_texture(gs_effect_get_param_by_name(shader, "image"), _cache_texture ? *_cache_texture : nullptr);
gs_draw_sprite(nullptr, 0, width, height);
}
}
}
color_grade_factory::color_grade_factory()
{
_info.id = S_PREFIX "filter-color-grade";
_info.type = OBS_SOURCE_TYPE_FILTER;
_info.output_flags = OBS_SOURCE_VIDEO;
support_size(false);
finish_setup();
register_proxy("obs-stream-effects-filter-color-grade");
}
color_grade_factory::~color_grade_factory() {}
const char* color_grade_factory::get_name()
{
return D_TRANSLATE(ST_I18N);
}
void color_grade_factory::get_defaults2(obs_data_t* data)
{
obs_data_set_default_double(data, ST_KEY_LIFT_(ST_RED), 0);
obs_data_set_default_double(data, ST_KEY_LIFT_(ST_GREEN), 0);
obs_data_set_default_double(data, ST_KEY_LIFT_(ST_BLUE), 0);
obs_data_set_default_double(data, ST_KEY_LIFT_(ST_ALL), 0);
obs_data_set_default_double(data, ST_KEY_GAMMA_(ST_RED), 0);
obs_data_set_default_double(data, ST_KEY_GAMMA_(ST_GREEN), 0);
obs_data_set_default_double(data, ST_KEY_GAMMA_(ST_BLUE), 0);
obs_data_set_default_double(data, ST_KEY_GAMMA_(ST_ALL), 0);
obs_data_set_default_double(data, ST_KEY_GAIN_(ST_RED), 100.0);
obs_data_set_default_double(data, ST_KEY_GAIN_(ST_GREEN), 100.0);
obs_data_set_default_double(data, ST_KEY_GAIN_(ST_BLUE), 100.0);
obs_data_set_default_double(data, ST_KEY_GAIN_(ST_ALL), 100.0);
obs_data_set_default_double(data, ST_KEY_OFFSET_(ST_RED), 0.0);
obs_data_set_default_double(data, ST_KEY_OFFSET_(ST_GREEN), 0.0);
obs_data_set_default_double(data, ST_KEY_OFFSET_(ST_BLUE), 0.0);
obs_data_set_default_double(data, ST_KEY_OFFSET_(ST_ALL), 0.0);
obs_data_set_default_int(data, ST_KEY_TINT_MODE, static_cast<int64_t>(luma_mode::Linear));
obs_data_set_default_int(data, ST_KEY_TINT_DETECTION, static_cast<int64_t>(detection_mode::YUV_SDR));
obs_data_set_default_double(data, ST_KEY_TINT_EXPONENT, 1.5);
obs_data_set_default_double(data, ST_KEY_TINT_(ST_TONE_LOW, ST_RED), 100.0);
obs_data_set_default_double(data, ST_KEY_TINT_(ST_TONE_LOW, ST_GREEN), 100.0);
obs_data_set_default_double(data, ST_KEY_TINT_(ST_TONE_LOW, ST_BLUE), 100.0);
obs_data_set_default_double(data, ST_KEY_TINT_(ST_TONE_MID, ST_RED), 100.0);
obs_data_set_default_double(data, ST_KEY_TINT_(ST_TONE_MID, ST_GREEN), 100.0);
obs_data_set_default_double(data, ST_KEY_TINT_(ST_TONE_MID, ST_BLUE), 100.0);
obs_data_set_default_double(data, ST_KEY_TINT_(ST_TONE_HIGH, ST_RED), 100.0);
obs_data_set_default_double(data, ST_KEY_TINT_(ST_TONE_HIGH, ST_GREEN), 100.0);
obs_data_set_default_double(data, ST_KEY_TINT_(ST_TONE_HIGH, ST_BLUE), 100.0);
obs_data_set_default_double(data, ST_KEY_CORRECTION_(ST_HUE), 0.0);
obs_data_set_default_double(data, ST_KEY_CORRECTION_(ST_SATURATION), 100.0);
obs_data_set_default_double(data, ST_KEY_CORRECTION_(ST_LIGHTNESS), 100.0);
obs_data_set_default_double(data, ST_KEY_CORRECTION_(ST_CONTRAST), 100.0);
obs_data_set_default_int(data, ST_KEY_RENDERMODE, -1);
}
obs_properties_t* color_grade_factory::get_properties2(color_grade_instance* data)
{
obs_properties_t* pr = obs_properties_create();
{
obs_properties_add_button2(pr, S_MANUAL_OPEN, D_TRANSLATE(S_MANUAL_OPEN), streamfx::filter::color_grade::color_grade_factory::on_manual_open, nullptr);
}
{
obs_properties_t* grp = obs_properties_create();
obs_properties_add_group(pr, ST_KEY_LIFT, D_TRANSLATE(ST_I18N_LIFT), OBS_GROUP_NORMAL, grp);
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_LIFT_(ST_RED), D_TRANSLATE(ST_I18N_LIFT_(ST_RED)), -1000., 100., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_LIFT_(ST_GREEN), D_TRANSLATE(ST_I18N_LIFT_(ST_GREEN)), -1000., 100., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_LIFT_(ST_BLUE), D_TRANSLATE(ST_I18N_LIFT_(ST_BLUE)), -1000., 100., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_LIFT_(ST_ALL), D_TRANSLATE(ST_I18N_LIFT_(ST_ALL)), -1000., 100., .01);
obs_property_float_set_suffix(p, " %");
}
}
{
obs_properties_t* grp = obs_properties_create();
obs_properties_add_group(pr, ST_KEY_GAMMA, D_TRANSLATE(ST_I18N_GAMMA), OBS_GROUP_NORMAL, grp);
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_GAMMA_(ST_RED), D_TRANSLATE(ST_I18N_GAMMA_(ST_RED)), -1000., 1000., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_GAMMA_(ST_GREEN), D_TRANSLATE(ST_I18N_GAMMA_(ST_GREEN)), -1000., 1000., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_GAMMA_(ST_BLUE), D_TRANSLATE(ST_I18N_GAMMA_(ST_BLUE)), -1000., 1000., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_GAMMA_(ST_ALL), D_TRANSLATE(ST_I18N_GAMMA_(ST_ALL)), -1000., 1000., .01);
obs_property_float_set_suffix(p, " %");
}
}
{
obs_properties_t* grp = obs_properties_create();
obs_properties_add_group(pr, ST_KEY_GAIN, D_TRANSLATE(ST_I18N_GAIN), OBS_GROUP_NORMAL, grp);
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_GAIN_(ST_RED), D_TRANSLATE(ST_I18N_GAIN_(ST_RED)), -1000., 1000., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_GAIN_(ST_GREEN), D_TRANSLATE(ST_I18N_GAIN_(ST_GREEN)), -1000., 1000., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_GAIN_(ST_BLUE), D_TRANSLATE(ST_I18N_GAIN_(ST_BLUE)), -1000., 1000., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_GAIN_(ST_ALL), D_TRANSLATE(ST_I18N_GAIN_(ST_ALL)), -1000., 1000., .01);
obs_property_float_set_suffix(p, " %");
}
}
{
obs_properties_t* grp = obs_properties_create();
obs_properties_add_group(pr, ST_KEY_OFFSET, D_TRANSLATE(ST_I18N_OFFSET), OBS_GROUP_NORMAL, grp);
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_OFFSET_(ST_RED), D_TRANSLATE(ST_I18N_OFFSET_(ST_RED)), -1000., 1000., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_OFFSET_(ST_GREEN), D_TRANSLATE(ST_I18N_OFFSET_(ST_GREEN)), -1000., 1000., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_OFFSET_(ST_BLUE), D_TRANSLATE(ST_I18N_OFFSET_(ST_BLUE)), -1000., 1000., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_OFFSET_(ST_ALL), D_TRANSLATE(ST_I18N_OFFSET_(ST_ALL)), -1000., 1000., .01);
obs_property_float_set_suffix(p, " %");
}
}
{
obs_properties_t* grp = obs_properties_create();
obs_properties_add_group(pr, ST_KEY_TINT, D_TRANSLATE(ST_I18N_TINT), OBS_GROUP_NORMAL, grp);
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_TINT_(ST_TONE_LOW, ST_RED), D_TRANSLATE(ST_I18N_TINT_(ST_TONE_LOW, ST_RED)), 0, 1000., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_TINT_(ST_TONE_LOW, ST_GREEN), D_TRANSLATE(ST_I18N_TINT_(ST_TONE_LOW, ST_GREEN)), 0, 1000., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_TINT_(ST_TONE_LOW, ST_BLUE), D_TRANSLATE(ST_I18N_TINT_(ST_TONE_LOW, ST_BLUE)), 0, 1000., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_TINT_(ST_TONE_MID, ST_RED), D_TRANSLATE(ST_I18N_TINT_(ST_TONE_MID, ST_RED)), 0, 1000., 0.01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_TINT_(ST_TONE_MID, ST_GREEN), D_TRANSLATE(ST_I18N_TINT_(ST_TONE_MID, ST_GREEN)), 0, 1000., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_TINT_(ST_TONE_MID, ST_BLUE), D_TRANSLATE(ST_I18N_TINT_(ST_TONE_MID, ST_BLUE)), 0, 1000., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_TINT_(ST_TONE_HIGH, ST_RED), D_TRANSLATE(ST_I18N_TINT_(ST_TONE_HIGH, ST_RED)), 0, 1000., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_TINT_(ST_TONE_HIGH, ST_GREEN), D_TRANSLATE(ST_I18N_TINT_(ST_TONE_HIGH, ST_GREEN)), 0, 1000., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_TINT_(ST_TONE_HIGH, ST_BLUE), D_TRANSLATE(ST_I18N_TINT_(ST_TONE_HIGH, ST_BLUE)), 0, 1000., .01);
obs_property_float_set_suffix(p, " %");
}
}
{
obs_properties_t* grp = obs_properties_create();
obs_properties_add_group(pr, ST_KEY_CORRECTION, D_TRANSLATE(ST_I18N_CORRECTION), OBS_GROUP_NORMAL, grp);
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_CORRECTION_(ST_HUE), D_TRANSLATE(ST_I18N_CORRECTION_(ST_HUE)), -180., 180., .01);
obs_property_float_set_suffix(p, " °");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_CORRECTION_(ST_SATURATION), D_TRANSLATE(ST_I18N_CORRECTION_(ST_SATURATION)), 0., 1000., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_CORRECTION_(ST_LIGHTNESS), D_TRANSLATE(ST_I18N_CORRECTION_(ST_LIGHTNESS)), 0., 1000., .01);
obs_property_float_set_suffix(p, " %");
}
{
auto p = obs_properties_add_float_slider(grp, ST_KEY_CORRECTION_(ST_CONTRAST), D_TRANSLATE(ST_I18N_CORRECTION_(ST_CONTRAST)), 0., 1000., .01);
obs_property_float_set_suffix(p, " %");
}
}
{
obs_properties_t* grp = obs_properties_create();
obs_properties_add_group(pr, S_ADVANCED, D_TRANSLATE(S_ADVANCED), OBS_GROUP_NORMAL, grp);
{
auto p = obs_properties_add_list(grp, ST_KEY_TINT_MODE, D_TRANSLATE(ST_I18N_TINT_MODE), OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_INT);
std::pair<const char*, luma_mode> els[] = {{ST_I18N_TINT_MODE_(ST_MODE_LINEAR), luma_mode::Linear}, {ST_I18N_TINT_MODE_(ST_MODE_EXP), luma_mode::Exp}, {ST_I18N_TINT_MODE_(ST_MODE_EXP2), luma_mode::Exp2}, {ST_I18N_TINT_MODE_(ST_MODE_LOG), luma_mode::Log}, {ST_I18N_TINT_MODE_(ST_MODE_LOG10), luma_mode::Log10}};
for (auto kv : els) {
obs_property_list_add_int(p, D_TRANSLATE(kv.first), static_cast<int64_t>(kv.second));
}
}
{
auto p = obs_properties_add_list(grp, ST_KEY_TINT_DETECTION, D_TRANSLATE(ST_I18N_TINT_DETECTION), OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_INT);
std::pair<const char*, detection_mode> els[] = {{ST_I18N_TINT_DETECTION_(ST_DETECTION_HSV), detection_mode::HSV}, {ST_I18N_TINT_DETECTION_(ST_DETECTION_HSL), detection_mode::HSL}, {ST_I18N_TINT_DETECTION_(ST_DETECTION_YUV_SDR), detection_mode::YUV_SDR}};
for (auto kv : els) {
obs_property_list_add_int(p, D_TRANSLATE(kv.first), static_cast<int64_t>(kv.second));
}
}
obs_properties_add_float_slider(grp, ST_KEY_TINT_EXPONENT, D_TRANSLATE(ST_I18N_TINT_EXPONENT), 0., 10., .01);
{
auto p = obs_properties_add_list(grp, ST_KEY_RENDERMODE, D_TRANSLATE(ST_I18N_RENDERMODE), OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_INT);
std::pair<const char*, int64_t> els[] = {
{S_STATE_AUTOMATIC, -1}, {ST_I18N_RENDERMODE_DIRECT, 0}, {ST_I18N_RENDERMODE_LUT_2BIT, static_cast<int64_t>(streamfx::gfx::lut::color_depth::_2)}, {ST_I18N_RENDERMODE_LUT_4BIT, static_cast<int64_t>(streamfx::gfx::lut::color_depth::_4)}, {ST_I18N_RENDERMODE_LUT_6BIT, static_cast<int64_t>(streamfx::gfx::lut::color_depth::_6)}, {ST_I18N_RENDERMODE_LUT_8BIT, static_cast<int64_t>(streamfx::gfx::lut::color_depth::_8)},
//{ST_RENDERMODE_LUT_10BIT, static_cast<int64_t>(gfx::lut::color_depth::_10)},
};
for (auto kv : els) {
obs_property_list_add_int(p, D_TRANSLATE(kv.first), kv.second);
}
}
}
return pr;
}
bool color_grade_factory::on_manual_open(obs_properties_t* props, obs_property_t* property, void* data)
{
try {
streamfx::open_url(HELP_URL);
return false;
} catch (const std::exception& ex) {
D_LOG_ERROR("Failed to open manual due to error: %s", ex.what());
return false;
} catch (...) {
D_LOG_ERROR("Failed to open manual due to unknown error.", "");
return false;
}
}
std::shared_ptr<color_grade_factory> streamfx::filter::color_grade::color_grade_factory::instance()
{
static std::weak_ptr<color_grade_factory> winst;
static std::mutex mtx;
std::unique_lock<decltype(mtx)> lock(mtx);
auto instance = winst.lock();
if (!instance) {
try {
instance = std::shared_ptr<color_grade_factory>(new color_grade_factory());
winst = instance;
} catch (const std::exception& ex) {
D_LOG_ERROR("Failed to initialize due to error: %s", ex.what());
} catch (...) {
D_LOG_ERROR("Failed to initialize due to unknown error.", "");
}
}
return instance;
}
static std::shared_ptr<color_grade_factory> loader_instance;
static auto loader = streamfx::component(
"color_grade",
[]() { // Initializer
loader_instance = color_grade_factory::instance();
},
[]() { // Finalizer
loader_instance.reset();
},
{});

View File

@ -0,0 +1,106 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2019-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "gfx/gfx-mipmapper.hpp"
#include "gfx/lut/gfx-lut-consumer.hpp"
#include "gfx/lut/gfx-lut-producer.hpp"
#include "gfx/lut/gfx-lut.hpp"
#include "obs/gs/gs-texrender.hpp"
#include "obs/gs/gs-texture.hpp"
#include "obs/gs/gs-vertexbuffer.hpp"
#include "obs/obs-source-factory.hpp"
#include "plugin.hpp"
#include "warning-disable.hpp"
#include <vector>
#include "warning-enable.hpp"
namespace streamfx::filter::color_grade {
enum class detection_mode {
HSV,
HSL,
YUV_SDR,
};
enum class luma_mode {
Linear,
Exp,
Exp2,
Log,
Log10,
};
class color_grade_instance : public obs::source_instance {
streamfx::obs::gs::effect _effect;
std::shared_ptr<streamfx::gfx::util> _gfx_util;
// User Configuration
vec4 _lift;
vec4 _gamma;
vec4 _gain;
vec4 _offset;
detection_mode _tint_detection;
luma_mode _tint_luma;
float _tint_exponent;
vec3 _tint_low;
vec3 _tint_mid;
vec3 _tint_hig;
vec4 _correction;
bool _lut_enabled;
streamfx::gfx::lut::color_depth _lut_depth;
// Capture Cache
std::shared_ptr<streamfx::obs::gs::texrender> _ccache_rt;
std::shared_ptr<streamfx::obs::gs::texture> _ccache_texture;
bool _ccache_fresh;
// LUT work flow
bool _lut_initialized;
bool _lut_dirty;
std::shared_ptr<streamfx::gfx::lut::producer> _lut_producer;
std::shared_ptr<streamfx::gfx::lut::consumer> _lut_consumer;
std::shared_ptr<streamfx::obs::gs::texrender> _lut_rt;
std::shared_ptr<streamfx::obs::gs::texture> _lut_texture;
// Render Cache
std::shared_ptr<streamfx::obs::gs::texrender> _cache_rt;
std::shared_ptr<streamfx::obs::gs::texture> _cache_texture;
bool _cache_fresh;
public:
color_grade_instance(obs_data_t* data, obs_source_t* self);
virtual ~color_grade_instance();
void allocate_rendertarget(gs_color_format format);
virtual void load(obs_data_t* data) override;
virtual void migrate(obs_data_t* data, uint64_t version) override;
virtual void update(obs_data_t* data) override;
void prepare_effect();
void rebuild_lut();
virtual void video_tick(float time) override;
virtual void video_render(gs_effect_t* effect) override;
};
class color_grade_factory : public obs::source_factory<filter::color_grade::color_grade_factory, filter::color_grade::color_grade_instance> {
public:
color_grade_factory();
virtual ~color_grade_factory();
virtual const char* get_name() override;
virtual void get_defaults2(obs_data_t* data) override;
virtual obs_properties_t* get_properties2(color_grade_instance* data) override;
static bool on_manual_open(obs_properties_t* props, obs_property_t* property, void* data);
public: // Singleton
static std::shared_ptr<color_grade_factory> instance();
};
} // namespace streamfx::filter::color_grade

View File

@ -0,0 +1,62 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2021-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// AUTOGENERATED COPYRIGHT HEADER END
#include "gfx-lut-consumer.hpp"
#include "obs/gs/gs-helper.hpp"
streamfx::gfx::lut::consumer::consumer()
{
_data = streamfx::gfx::lut::data::instance();
if (!_data->consumer_effect())
throw std::runtime_error("Unable to get LUT consumer effect.");
}
streamfx::gfx::lut::consumer::~consumer() = default;
std::shared_ptr<streamfx::obs::gs::effect> streamfx::gfx::lut::consumer::prepare(streamfx::gfx::lut::color_depth depth, std::shared_ptr<streamfx::obs::gs::texture> lut)
{
auto gctx = streamfx::obs::gs::context();
auto effect = _data->consumer_effect();
int32_t idepth = static_cast<int32_t>(depth);
int32_t size = static_cast<int32_t>(pow(2l, idepth));
int32_t grid_size = static_cast<int32_t>(pow(2l, (idepth / 2)));
int32_t container_size = static_cast<int32_t>(pow(2l, (idepth + (idepth / 2))));
if (streamfx::obs::gs::effect_parameter efp = effect->get_parameter("lut_params_0"); efp) {
efp.set_int4(size, grid_size, container_size, 0l);
}
if (streamfx::obs::gs::effect_parameter efp = effect->get_parameter("lut_params_1"); efp) {
float inverse_size = 1.f / static_cast<float>(size);
float inverse_z_size = 1.f / static_cast<float>(grid_size);
float inverse_container_size = 1.f / static_cast<float>(container_size);
float half_texel = inverse_container_size / 2.f;
efp.set_float4(inverse_size, inverse_z_size, inverse_container_size, half_texel);
}
if (streamfx::obs::gs::effect_parameter efp = effect->get_parameter("lut"); efp) {
efp.set_texture(lut);
}
return effect;
}
void streamfx::gfx::lut::consumer::consume(streamfx::gfx::lut::color_depth depth, std::shared_ptr<streamfx::obs::gs::texture> lut, std::shared_ptr<streamfx::obs::gs::texture> texture)
{
auto gctx = streamfx::obs::gs::context();
auto effect = prepare(depth, lut);
if (streamfx::obs::gs::effect_parameter efp = effect->get_parameter("image"); efp) {
efp.set_texture(texture->get_object());
}
// Draw a simple quad.
while (gs_effect_loop(effect->get_object(), "Draw")) {
gs_draw_sprite(nullptr, 0, 1, 1);
}
}

View File

@ -0,0 +1,26 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "gfx-lut.hpp"
#include "obs/gs/gs-effect.hpp"
#include "obs/gs/gs-texture.hpp"
#include "warning-disable.hpp"
#include <memory>
#include "warning-enable.hpp"
namespace streamfx::gfx::lut {
class consumer {
std::shared_ptr<streamfx::gfx::lut::data> _data;
public:
consumer();
~consumer();
std::shared_ptr<streamfx::obs::gs::effect> prepare(streamfx::gfx::lut::color_depth depth, std::shared_ptr<streamfx::obs::gs::texture> lut);
void consume(streamfx::gfx::lut::color_depth depth, std::shared_ptr<streamfx::obs::gs::texture> lut, std::shared_ptr<streamfx::obs::gs::texture> texture);
};
} // namespace streamfx::gfx::lut

View File

@ -0,0 +1,76 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2021-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// AUTOGENERATED COPYRIGHT HEADER END
#include "gfx-lut-producer.hpp"
#include "obs/gs/gs-helper.hpp"
gs_color_format format_from_depth(streamfx::gfx::lut::color_depth depth)
{
switch (depth) {
case streamfx::gfx::lut::color_depth::_2:
case streamfx::gfx::lut::color_depth::_4:
case streamfx::gfx::lut::color_depth::_6:
case streamfx::gfx::lut::color_depth::_8:
return gs_color_format::GS_RGBA;
case streamfx::gfx::lut::color_depth::_10:
return gs_color_format::GS_R10G10B10A2;
case streamfx::gfx::lut::color_depth::_12:
case streamfx::gfx::lut::color_depth::_14:
case streamfx::gfx::lut::color_depth::_16:
return gs_color_format::GS_RGBA16;
default:
return GS_RGBA32F;
}
}
streamfx::gfx::lut::producer::producer() : _gfx_util(::streamfx::gfx::util::get())
{
_data = streamfx::gfx::lut::data::instance();
if (!_data->producer_effect())
throw std::runtime_error("Unable to get LUT producer effect.");
}
streamfx::gfx::lut::producer::~producer() = default;
std::shared_ptr<streamfx::obs::gs::texture> streamfx::gfx::lut::producer::produce(streamfx::gfx::lut::color_depth depth)
{
auto gctx = streamfx::obs::gs::context();
if (!_rt || (_rt->color_format() != format_from_depth((depth)))) {
_rt = std::make_shared<streamfx::obs::gs::texrender>(format_from_depth(depth), GS_ZS_NONE);
}
auto effect = _data->producer_effect();
int32_t idepth = static_cast<int32_t>(depth);
int32_t size = static_cast<int32_t>(pow(2l, idepth));
int32_t grid_size = static_cast<int32_t>(pow(2l, (idepth / 2)));
int32_t container_size = static_cast<int32_t>(pow(2l, (idepth + (idepth / 2))));
{
auto op = _rt->render(static_cast<uint32_t>(container_size), static_cast<uint32_t>(container_size));
gs_blend_state_push();
gs_enable_color(true, true, true, false);
gs_enable_blending(false);
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_ortho(0, 1, 0, 1, 0, 1);
if (streamfx::obs::gs::effect_parameter efp = effect->get_parameter("lut_params_0"); efp) {
efp.set_int4(size, grid_size, container_size, 0l);
}
while (gs_effect_loop(effect->get_object(), "Draw")) {
_gfx_util->draw_fullscreen_triangle();
}
gs_enable_color(true, true, true, true);
gs_blend_state_pop();
}
return _rt->get_texture();
}

View File

@ -0,0 +1,26 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "warning-disable.hpp"
#include <memory>
#include "gfx-lut.hpp"
#include "gfx/gfx-util.hpp"
#include "obs/gs/gs-effect.hpp"
#include "obs/gs/gs-texrender.hpp"
#include "warning-enable.hpp"
namespace streamfx::gfx::lut {
class producer {
std::shared_ptr<streamfx::gfx::lut::data> _data;
std::shared_ptr<streamfx::obs::gs::texrender> _rt;
std::shared_ptr<streamfx::gfx::util> _gfx_util;
public:
producer();
~producer();
std::shared_ptr<streamfx::obs::gs::texture> produce(streamfx::gfx::lut::color_depth depth);
};
} // namespace streamfx::gfx::lut

View File

@ -0,0 +1,73 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2021-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#include "gfx-lut.hpp"
#include "obs/gs/gs-helper.hpp"
#include "plugin.hpp"
#include "util/util-logging.hpp"
#include "warning-disable.hpp"
#include <mutex>
#include "warning-enable.hpp"
#ifdef _DEBUG
#define ST_PREFIX "<%s> "
#define D_LOG_ERROR(x, ...) P_LOG_ERROR(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#define D_LOG_WARNING(x, ...) P_LOG_WARN(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#define D_LOG_INFO(x, ...) P_LOG_INFO(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#define D_LOG_DEBUG(x, ...) P_LOG_DEBUG(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#else
#define ST_PREFIX "<transition::shader> "
#define D_LOG_ERROR(...) P_LOG_ERROR(ST_PREFIX __VA_ARGS__)
#define D_LOG_WARNING(...) P_LOG_WARN(ST_PREFIX __VA_ARGS__)
#define D_LOG_INFO(...) P_LOG_INFO(ST_PREFIX __VA_ARGS__)
#define D_LOG_DEBUG(...) P_LOG_DEBUG(ST_PREFIX __VA_ARGS__)
#endif
using namespace streamfx;
std::shared_ptr<streamfx::gfx::lut::data> streamfx::gfx::lut::data::instance()
{
static std::weak_ptr<streamfx::gfx::lut::data> _instance;
static std::mutex _mutex;
std::lock_guard<std::mutex> lock(_mutex);
auto reference = _instance.lock();
if (!reference) {
reference = std::shared_ptr<streamfx::gfx::lut::data>(new streamfx::gfx::lut::data());
_instance = reference;
}
return reference;
}
streamfx::gfx::lut::data::data() : _producer_effect(), _consumer_effect()
{
auto gctx = streamfx::obs::gs::context();
std::filesystem::path lut_producer_path = streamfx::data_file_path("effects/lut-producer.effect");
if (std::filesystem::exists(lut_producer_path)) {
try {
_producer_effect = std::make_shared<streamfx::obs::gs::effect>(lut_producer_path);
} catch (std::exception const& ex) {
D_LOG_ERROR("Loading LUT Producer effect failed: %s", ex.what());
}
}
std::filesystem::path lut_consumer_path = streamfx::data_file_path("effects/lut-consumer.effect");
if (std::filesystem::exists(lut_consumer_path)) {
try {
_consumer_effect = std::make_shared<streamfx::obs::gs::effect>(lut_consumer_path);
} catch (std::exception const& ex) {
D_LOG_ERROR("Loading LUT Consumer effect failed: %s", ex.what());
}
}
}
streamfx::gfx::lut::data::~data()
{
auto gctx = streamfx::obs::gs::context();
_producer_effect.reset();
_consumer_effect.reset();
}

View File

@ -0,0 +1,48 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "obs/gs/gs-effect.hpp"
#include "warning-disable.hpp"
#include <memory>
#include "warning-enable.hpp"
namespace streamfx::gfx::lut {
class data {
std::shared_ptr<streamfx::obs::gs::effect> _producer_effect;
std::shared_ptr<streamfx::obs::gs::effect> _consumer_effect;
public:
static std::shared_ptr<data> instance();
private:
data();
public:
~data();
inline std::shared_ptr<streamfx::obs::gs::effect> producer_effect()
{
return _producer_effect;
};
inline std::shared_ptr<streamfx::obs::gs::effect> consumer_effect()
{
return _consumer_effect;
};
};
enum class color_depth {
Invalid = 0,
_2 = 2,
_4 = 4,
_6 = 6,
_8 = 8,
_10 = 10,
_12 = 12,
_14 = 14,
_16 = 16,
};
} // namespace streamfx::gfx::lut

View File

@ -0,0 +1,23 @@
# AUTOGENERATED COPYRIGHT HEADER START
# Copyright (C) 2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
# AUTOGENERATED COPYRIGHT HEADER END
cmake_minimum_required(VERSION 3.26)
project("Denoising")
list(APPEND CMAKE_MESSAGE_INDENT "[${PROJECT_NAME}] ")
streamfx_add_component("Denoising"
RESOLVER streamfx_denoising_resolver
)
streamfx_add_component_dependency("NVIDIA" OPTIONAL)
function(streamfx_denoising_resolver)
# Providers
#- NVIDIA
streamfx_enabled_component("NVIDIA" T_CHECK)
if(T_CHECK)
target_compile_definitions(${COMPONENT_TARGET} PRIVATE
ENABLE_NVIDIA
)
endif()
endfunction()

View File

@ -0,0 +1,634 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2021-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#include "filter-denoising.hpp"
#include "obs/gs/gs-helper.hpp"
#include "plugin.hpp"
#include "util/util-logging.hpp"
#include "warning-disable.hpp"
#include <algorithm>
#include "warning-enable.hpp"
#ifdef _DEBUG
#define ST_PREFIX "<%s> "
#define D_LOG_ERROR(x, ...) P_LOG_ERROR(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#define D_LOG_WARNING(x, ...) P_LOG_WARN(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#define D_LOG_INFO(x, ...) P_LOG_INFO(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#define D_LOG_DEBUG(x, ...) P_LOG_DEBUG(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#else
#define ST_PREFIX "<filter::video_denoising> "
#define D_LOG_ERROR(...) P_LOG_ERROR(ST_PREFIX __VA_ARGS__)
#define D_LOG_WARNING(...) P_LOG_WARN(ST_PREFIX __VA_ARGS__)
#define D_LOG_INFO(...) P_LOG_INFO(ST_PREFIX __VA_ARGS__)
#define D_LOG_DEBUG(...) P_LOG_DEBUG(ST_PREFIX __VA_ARGS__)
#endif
#define ST_I18N "Filter.Denoising"
#define ST_KEY_PROVIDER "Provider"
#define ST_I18N_PROVIDER ST_I18N "." ST_KEY_PROVIDER
#define ST_I18N_PROVIDER_NVIDIA_DENOISING ST_I18N_PROVIDER ".NVIDIA.Denoising"
#ifdef ENABLE_NVIDIA
#define ST_KEY_NVIDIA_DENOISING "NVIDIA.Denoising"
#define ST_I18N_NVIDIA_DENOISING ST_I18N "." ST_KEY_NVIDIA_DENOISING
#define ST_KEY_NVIDIA_DENOISING_STRENGTH "NVIDIA.Denoising.Strength"
#define ST_I18N_NVIDIA_DENOISING_STRENGTH ST_I18N "." ST_KEY_NVIDIA_DENOISING_STRENGTH
#define ST_I18N_NVIDIA_DENOISING_STRENGTH_WEAK ST_I18N_NVIDIA_DENOISING_STRENGTH ".Weak"
#define ST_I18N_NVIDIA_DENOISING_STRENGTH_STRONG ST_I18N_NVIDIA_DENOISING_STRENGTH ".Strong"
#endif
using streamfx::filter::denoising::denoising_factory;
using streamfx::filter::denoising::denoising_instance;
using streamfx::filter::denoising::denoising_provider;
static constexpr std::string_view HELP_URL = "https://github.com/Xaymar/obs-StreamFX/wiki/Filter-Denoising";
static denoising_provider provider_priority[] = {
denoising_provider::NVIDIA_DENOISING,
};
const char* streamfx::filter::denoising::cstring(denoising_provider provider)
{
switch (provider) {
case denoising_provider::INVALID:
return "N/A";
case denoising_provider::AUTOMATIC:
return D_TRANSLATE(S_STATE_AUTOMATIC);
case denoising_provider::NVIDIA_DENOISING:
return D_TRANSLATE(ST_I18N_PROVIDER_NVIDIA_DENOISING);
default:
throw std::runtime_error("Missing Conversion Entry");
}
}
std::string streamfx::filter::denoising::string(denoising_provider provider)
{
return cstring(provider);
}
//------------------------------------------------------------------------------
// Instance
//------------------------------------------------------------------------------
denoising_instance::denoising_instance(obs_data_t* data, obs_source_t* self)
: obs::source_instance(data, self),
_size(1, 1), _provider(denoising_provider::INVALID), _provider_ui(denoising_provider::INVALID), _provider_ready(false), _provider_lock(), _provider_task(), _input(), _output()
{
D_LOG_DEBUG("Initializating... (Addr: 0x%" PRIuPTR ")", this);
{
::streamfx::obs::gs::context gctx;
// Create the render target for the input buffering.
_input = std::make_shared<::streamfx::obs::gs::texrender>(GS_RGBA_UNORM, GS_ZS_NONE);
_input->render(1, 1); // Preallocate the RT on the driver and GPU.
_output = _input->get_texture();
// Load the required effect.
_standard_effect = std::make_shared<::streamfx::obs::gs::effect>(::streamfx::data_file_path("effects/standard.effect"));
}
if (data) {
load(data);
}
}
denoising_instance::~denoising_instance()
{
D_LOG_DEBUG("Finalizing... (Addr: 0x%" PRIuPTR ")", this);
{ // Unload the underlying effect ASAP.
std::unique_lock<std::mutex> ul(_provider_lock);
// De-queue the underlying task.
if (_provider_task) {
streamfx::util::threadpool::threadpool::instance()->pop(_provider_task);
_provider_task->await_completion();
_provider_task.reset();
}
// TODO: Make this asynchronous.
switch (_provider) {
#ifdef ENABLE_NVIDIA
case denoising_provider::NVIDIA_DENOISING:
nvvfx_denoising_unload();
break;
#endif
default:
break;
}
}
}
void denoising_instance::load(obs_data_t* data)
{
update(data);
}
void denoising_instance::migrate(obs_data_t* data, uint64_t version) {}
void denoising_instance::update(obs_data_t* data)
{
// Check if the user changed which Denoising provider we use.
denoising_provider provider = static_cast<denoising_provider>(obs_data_get_int(data, ST_KEY_PROVIDER));
if (provider == denoising_provider::AUTOMATIC) {
provider = denoising_factory::instance()->find_ideal_provider();
}
// Check if the provider was changed, and if so switch.
if (provider != _provider) {
_provider_ui = provider;
switch_provider(provider);
}
if (_provider_ready) {
std::unique_lock<std::mutex> ul(_provider_lock);
switch (_provider) {
#ifdef ENABLE_NVIDIA
case denoising_provider::NVIDIA_DENOISING:
nvvfx_denoising_update(data);
break;
#endif
default:
break;
}
}
}
void streamfx::filter::denoising::denoising_instance::properties(obs_properties_t* properties)
{
switch (_provider_ui) {
#ifdef ENABLE_NVIDIA
case denoising_provider::NVIDIA_DENOISING:
nvvfx_denoising_properties(properties);
break;
#endif
default:
break;
}
}
uint32_t streamfx::filter::denoising::denoising_instance::get_width()
{
return std::max<uint32_t>(_size.first, 1);
}
uint32_t streamfx::filter::denoising::denoising_instance::get_height()
{
return std::max<uint32_t>(_size.second, 1);
}
void denoising_instance::video_tick(float time)
{
auto parent = obs_filter_get_parent(_self);
auto target = obs_filter_get_target(_self);
auto width = obs_source_get_base_width(target);
auto height = obs_source_get_base_height(target);
// Verify that the detected size makes sense.
if ((width > 0) && (height > 0)) {
_size = {width, height};
}
// Allow the provider to restrict the size.
if (target && _provider_ready) {
std::unique_lock<std::mutex> ul(_provider_lock);
switch (_provider) {
#ifdef ENABLE_NVIDIA
case denoising_provider::NVIDIA_DENOISING:
nvvfx_denoising_size();
break;
#endif
default:
break;
}
}
_dirty = true;
}
void denoising_instance::video_render(gs_effect_t* effect)
{
auto parent = obs_filter_get_parent(_self);
auto target = obs_filter_get_target(_self);
auto width = obs_source_get_base_width(target);
auto height = obs_source_get_base_height(target);
vec4 blank = vec4{0, 0, 0, 0};
// Ensure we have the bare minimum of valid information.
target = target ? target : parent;
effect = effect ? effect : obs_get_base_effect(OBS_EFFECT_DEFAULT);
// Skip the filter if:
// - The Provider isn't ready yet.
// - We don't have a target.
// - The width/height of the next filter in the chain is empty.
if (!_provider_ready || !target || (width == 0) || (height == 0)) {
obs_source_skip_video_filter(_self);
return;
}
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
::streamfx::obs::gs::debug_marker profiler0{::streamfx::obs::gs::debug_color_source, "StreamFX Denoising"};
::streamfx::obs::gs::debug_marker profiler0_0{::streamfx::obs::gs::debug_color_gray, "'%s' on '%s'", obs_source_get_name(_self), obs_source_get_name(parent)};
#endif
if (_dirty) { // Lock the provider from being changed.
std::unique_lock<std::mutex> ul(_provider_lock);
{ // Allow the provider to restrict the size.
switch (_provider) {
#ifdef ENABLE_NVIDIA
case denoising_provider::NVIDIA_DENOISING:
nvvfx_denoising_size();
break;
#endif
default:
_size = {width, height};
break;
}
}
{ // Capture the incoming frame.
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
::streamfx::obs::gs::debug_marker profiler1{::streamfx::obs::gs::debug_color_capture, "Capture"};
#endif
if (obs_source_process_filter_begin(_self, GS_RGBA, OBS_ALLOW_DIRECT_RENDERING)) {
auto op = _input->render(_size.first, _size.second);
// Matrix
gs_matrix_push();
gs_ortho(0., 1., 0., 1., 0., 1.);
// Clear the buffer
gs_clear(GS_CLEAR_COLOR | GS_CLEAR_DEPTH, &blank, 0, 0);
// Set GPU state
gs_blend_state_push();
gs_enable_color(true, true, true, true);
gs_enable_blending(false);
gs_enable_depth_test(false);
gs_enable_stencil_test(false);
gs_set_cull_mode(GS_NEITHER);
// Render
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
::streamfx::obs::gs::debug_marker profiler2{::streamfx::obs::gs::debug_color_capture, "Storage"};
#endif
obs_source_process_filter_end(_self, obs_get_base_effect(OBS_EFFECT_DEFAULT), 1, 1);
// Reset GPU state
gs_blend_state_pop();
gs_matrix_pop();
} else {
obs_source_skip_video_filter(_self);
return;
}
}
try { // Process the captured input with the provider.
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
::streamfx::obs::gs::debug_marker profiler1{::streamfx::obs::gs::debug_color_convert, "Process"};
#endif
switch (_provider) {
#ifdef ENABLE_NVIDIA
case denoising_provider::NVIDIA_DENOISING:
nvvfx_denoising_process();
break;
#endif
default:
_output.reset();
break;
}
} catch (...) {
obs_source_skip_video_filter(_self);
return;
}
if (!_output) {
D_LOG_ERROR("Provider '%s' did not return a result.", cstring(_provider));
obs_source_skip_video_filter(_self);
return;
}
// Mark "clean".
_dirty = false;
// Unlock the provider, as we are no longer doing critical work with it.
}
{ // Draw the result for the next filter to use.
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
::streamfx::obs::gs::debug_marker profiler1{::streamfx::obs::gs::debug_color_render, "Render"};
#endif
if (_standard_effect->has_parameter("InputA", ::streamfx::obs::gs::effect_parameter::type::Texture)) {
_standard_effect->get_parameter("InputA").set_texture(_output);
}
if (_standard_effect->has_parameter("InputB", ::streamfx::obs::gs::effect_parameter::type::Texture)) {
_standard_effect->get_parameter("InputB").set_texture(_input->get_texture());
}
while (gs_effect_loop(_standard_effect->get_object(), "RestoreAlpha")) {
gs_draw_sprite(nullptr, 0, _size.first, _size.second);
}
}
}
struct switch_provider_data_t {
denoising_provider provider;
};
void streamfx::filter::denoising::denoising_instance::switch_provider(denoising_provider provider)
{
std::unique_lock<std::mutex> ul(_provider_lock);
// Safeguard against calls made from unlocked memory.
if (provider == _provider) {
return;
}
// This doesn't work correctly.
// - Need to allow multiple switches at once because OBS is weird.
// - Doesn't guarantee that the task is properly killed off.
// Log information.
D_LOG_INFO("Instance '%s' is switching provider from '%s' to '%s'.", obs_source_get_name(_self), cstring(_provider), cstring(provider));
// If there is an existing task, attempt to cancel it.
if (_provider_task) {
// De-queue it.
streamfx::util::threadpool::threadpool::instance()->pop(_provider_task);
// Await the death of the task itself.
_provider_task->await_completion();
// Clear any memory associated with it.
_provider_task.reset();
}
// Build data to pass into the task.
auto spd = std::make_shared<switch_provider_data_t>();
spd->provider = _provider;
_provider = provider;
// Then spawn a new task to switch provider.
_provider_task = streamfx::util::threadpool::threadpool::instance()->push(std::bind(&denoising_instance::task_switch_provider, this, std::placeholders::_1), spd);
}
void streamfx::filter::denoising::denoising_instance::task_switch_provider(util::threadpool::task_data_t data)
{
std::shared_ptr<switch_provider_data_t> spd = std::static_pointer_cast<switch_provider_data_t>(data);
// 1. Mark the provider as no longer ready.
_provider_ready = false;
// 2. Lock the provider from being used.
std::unique_lock<std::mutex> ul(_provider_lock);
try {
// 3. Unload the previous provider.
switch (spd->provider) {
#ifdef ENABLE_NVIDIA
case denoising_provider::NVIDIA_DENOISING:
nvvfx_denoising_unload();
break;
#endif
default:
break;
}
// 4. Load the new provider.
switch (_provider) {
#ifdef ENABLE_NVIDIA
case denoising_provider::NVIDIA_DENOISING:
nvvfx_denoising_load();
break;
#endif
default:
break;
}
// Log information.
D_LOG_INFO("Instance '%s' switched provider from '%s' to '%s'.", obs_source_get_name(_self), cstring(spd->provider), cstring(_provider));
_provider_ready = true;
} catch (std::exception const& ex) {
// Log information.
D_LOG_ERROR("Instance '%s' failed switching provider with error: %s", obs_source_get_name(_self), ex.what());
}
}
#ifdef ENABLE_NVIDIA
void streamfx::filter::denoising::denoising_instance::nvvfx_denoising_load()
{
_nvidia_fx = std::make_shared<::streamfx::nvidia::vfx::denoising>();
}
void streamfx::filter::denoising::denoising_instance::nvvfx_denoising_unload()
{
_nvidia_fx.reset();
}
void streamfx::filter::denoising::denoising_instance::nvvfx_denoising_size()
{
if (!_nvidia_fx) {
return;
}
_nvidia_fx->size(_size);
}
void streamfx::filter::denoising::denoising_instance::nvvfx_denoising_process()
{
if (!_nvidia_fx) {
_output = _input->get_texture();
return;
}
_output = _nvidia_fx->process(_input->get_texture());
}
void streamfx::filter::denoising::denoising_instance::nvvfx_denoising_properties(obs_properties_t* props)
{
obs_properties_t* grp = obs_properties_create();
obs_properties_add_group(props, ST_KEY_NVIDIA_DENOISING, D_TRANSLATE(ST_I18N_NVIDIA_DENOISING), OBS_GROUP_NORMAL, grp);
{
auto p = obs_properties_add_list(grp, ST_KEY_NVIDIA_DENOISING_STRENGTH, D_TRANSLATE(ST_I18N_NVIDIA_DENOISING_STRENGTH), OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_INT);
obs_property_list_add_int(p, D_TRANSLATE(ST_I18N_NVIDIA_DENOISING_STRENGTH_WEAK), 0);
obs_property_list_add_int(p, D_TRANSLATE(ST_I18N_NVIDIA_DENOISING_STRENGTH_STRONG), 1);
}
}
void streamfx::filter::denoising::denoising_instance::nvvfx_denoising_update(obs_data_t* data)
{
if (!_nvidia_fx)
return;
_nvidia_fx->set_strength(static_cast<float>(obs_data_get_int(data, ST_KEY_NVIDIA_DENOISING_STRENGTH) == 0 ? 0. : 1.));
}
#endif
//------------------------------------------------------------------------------
// Factory
//------------------------------------------------------------------------------
denoising_factory::~denoising_factory() {}
denoising_factory::denoising_factory()
{
bool any_available = false;
// 1. Try and load any configured providers.
#ifdef ENABLE_NVIDIA
try {
// Load CVImage and Video Effects SDK.
_nvcuda = ::streamfx::nvidia::cuda::obs::get();
_nvcvi = ::streamfx::nvidia::cv::cv::get();
_nvvfx = ::streamfx::nvidia::vfx::vfx::get();
_nvidia_available = true;
any_available |= _nvidia_available;
} catch (const std::exception& ex) {
_nvidia_available = false;
_nvvfx.reset();
_nvcvi.reset();
_nvcuda.reset();
D_LOG_WARNING("Failed to make NVIDIA providers available due to error: %s", ex.what());
} catch (...) {
_nvidia_available = false;
_nvvfx.reset();
_nvcvi.reset();
_nvcuda.reset();
D_LOG_WARNING("Failed to make NVIDIA providers available with unknown error.", nullptr);
}
#endif
// 2. Check if any of them managed to load at all.
if (!any_available) {
D_LOG_ERROR("All supported providers failed to initialize, disabling effect.", 0);
return;
}
// 3. In any other case, register the filter!
_info.id = S_PREFIX "filter-denoising";
_info.type = OBS_SOURCE_TYPE_FILTER;
_info.output_flags = OBS_SOURCE_VIDEO | OBS_SOURCE_CUSTOM_DRAW;
support_size(true);
finish_setup();
// Proxies
register_proxy("streamfx-filter-video-denoising");
}
const char* denoising_factory::get_name()
{
return D_TRANSLATE(ST_I18N);
}
void denoising_factory::get_defaults2(obs_data_t* data)
{
obs_data_set_default_int(data, ST_KEY_PROVIDER, static_cast<int64_t>(denoising_provider::AUTOMATIC));
#ifdef ENABLE_NVIDIA
obs_data_set_default_double(data, ST_KEY_NVIDIA_DENOISING_STRENGTH, 1.);
#endif
}
static bool modified_provider(obs_properties_t* props, obs_property_t*, obs_data_t* settings) noexcept
{
try {
return true;
} catch (const std::exception& ex) {
DLOG_ERROR("Unexpected exception in function '%s': %s.", __FUNCTION_NAME__, ex.what());
return false;
} catch (...) {
DLOG_ERROR("Unexpected exception in function '%s'.", __FUNCTION_NAME__);
return false;
}
}
obs_properties_t* denoising_factory::get_properties2(denoising_instance* data)
{
obs_properties_t* pr = obs_properties_create();
{
obs_properties_add_button2(pr, S_MANUAL_OPEN, D_TRANSLATE(S_MANUAL_OPEN), denoising_factory::on_manual_open, nullptr);
}
if (data) {
data->properties(pr);
}
{ // Advanced Settings
auto grp = obs_properties_create();
obs_properties_add_group(pr, S_ADVANCED, D_TRANSLATE(S_ADVANCED), OBS_GROUP_NORMAL, grp);
{
auto p = obs_properties_add_list(grp, ST_KEY_PROVIDER, D_TRANSLATE(ST_I18N_PROVIDER), OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_INT);
obs_property_set_modified_callback(p, modified_provider);
obs_property_list_add_int(p, D_TRANSLATE(S_STATE_AUTOMATIC), static_cast<int64_t>(denoising_provider::AUTOMATIC));
obs_property_list_add_int(p, D_TRANSLATE(ST_I18N_PROVIDER_NVIDIA_DENOISING), static_cast<int64_t>(denoising_provider::NVIDIA_DENOISING));
}
}
return pr;
}
bool denoising_factory::on_manual_open(obs_properties_t* props, obs_property_t* property, void* data)
{
streamfx::open_url(HELP_URL);
return false;
}
bool streamfx::filter::denoising::denoising_factory::is_provider_available(denoising_provider provider)
{
switch (provider) {
#ifdef ENABLE_NVIDIA
case denoising_provider::NVIDIA_DENOISING:
return _nvidia_available;
#endif
default:
return false;
}
}
denoising_provider streamfx::filter::denoising::denoising_factory::find_ideal_provider()
{
for (auto v : provider_priority) {
if (is_provider_available(v)) {
return v;
break;
}
}
return denoising_provider::AUTOMATIC;
}
std::shared_ptr<denoising_factory> denoising_factory::instance()
{
static std::weak_ptr<denoising_factory> winst;
static std::mutex mtx;
std::unique_lock<decltype(mtx)> lock(mtx);
auto instance = winst.lock();
if (!instance) {
instance = std::shared_ptr<denoising_factory>(new denoising_factory());
winst = instance;
}
return instance;
}
static std::shared_ptr<denoising_factory> loader_instance;
static auto loader = streamfx::component(
"denoising",
[]() { // Initializer
loader_instance = denoising_factory::instance();
},
[]() { // Finalizer
loader_instance.reset();
},
{"core::threadpool", "core::gs::sampler", "core::gs::texrender", "core::gs::texture"});

View File

@ -0,0 +1,110 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2021-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "obs/gs/gs-effect.hpp"
#include "obs/gs/gs-texrender.hpp"
#include "obs/gs/gs-texture.hpp"
#include "obs/obs-source-factory.hpp"
#include "plugin.hpp"
#include "util/util-threadpool.hpp"
#include "warning-disable.hpp"
#include <atomic>
#include <memory>
#include <mutex>
#include "warning-enable.hpp"
#ifdef ENABLE_NVIDIA
#include "nvidia/vfx/nvidia-vfx-denoising.hpp"
#endif
namespace streamfx::filter::denoising {
enum denoising_provider {
INVALID = -1,
AUTOMATIC = 0,
NVIDIA_DENOISING = 1,
};
const char* cstring(denoising_provider provider);
std::string string(denoising_provider provider);
class denoising_instance : public obs::source_instance {
std::pair<uint32_t, uint32_t> _size;
denoising_provider _provider;
denoising_provider _provider_ui;
std::atomic<bool> _provider_ready;
std::mutex _provider_lock;
std::shared_ptr<util::threadpool::task> _provider_task;
std::shared_ptr<::streamfx::obs::gs::effect> _standard_effect;
std::shared_ptr<::streamfx::obs::gs::texrender> _input;
std::shared_ptr<::streamfx::obs::gs::texture> _output;
bool _dirty;
#ifdef ENABLE_NVIDIA
std::shared_ptr<::streamfx::nvidia::vfx::denoising> _nvidia_fx;
#endif
public:
denoising_instance(obs_data_t* data, obs_source_t* self);
~denoising_instance() override;
void load(obs_data_t* data) override;
void migrate(obs_data_t* data, uint64_t version) override;
void update(obs_data_t* data) override;
void properties(obs_properties_t* properties);
uint32_t get_width() override;
uint32_t get_height() override;
void video_tick(float time) override;
void video_render(gs_effect_t* effect) override;
private:
void switch_provider(denoising_provider provider);
void task_switch_provider(util::threadpool::task_data_t data);
#ifdef ENABLE_NVIDIA
void nvvfx_denoising_load();
void nvvfx_denoising_unload();
void nvvfx_denoising_size();
void nvvfx_denoising_process();
void nvvfx_denoising_properties(obs_properties_t* props);
void nvvfx_denoising_update(obs_data_t* data);
#endif
};
class denoising_factory : public obs::source_factory<::streamfx::filter::denoising::denoising_factory, ::streamfx::filter::denoising::denoising_instance> {
#ifdef ENABLE_NVIDIA
bool _nvidia_available;
std::shared_ptr<::streamfx::nvidia::cuda::obs> _nvcuda;
std::shared_ptr<::streamfx::nvidia::cv::cv> _nvcvi;
std::shared_ptr<::streamfx::nvidia::vfx::vfx> _nvvfx;
#endif
public:
virtual ~denoising_factory();
denoising_factory();
virtual const char* get_name() override;
virtual void get_defaults2(obs_data_t* data) override;
virtual obs_properties_t* get_properties2(denoising_instance* data) override;
static bool on_manual_open(obs_properties_t* props, obs_property_t* property, void* data);
bool is_provider_available(denoising_provider);
denoising_provider find_ideal_provider();
public: // Singleton
static void initialize();
static void finalize();
static std::shared_ptr<::streamfx::filter::denoising::denoising_factory> instance();
};
} // namespace streamfx::filter::denoising

View File

@ -0,0 +1,9 @@
# AUTOGENERATED COPYRIGHT HEADER START
# Copyright (C) 2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
# AUTOGENERATED COPYRIGHT HEADER END
cmake_minimum_required(VERSION 3.26)
project("DynamicMask")
list(APPEND CMAKE_MESSAGE_INDENT "[${PROJECT_NAME}] ")
streamfx_add_component("Dynamic Mask")

View File

@ -0,0 +1,845 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2019-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// AUTOGENERATED COPYRIGHT HEADER END
#include "filter-dynamic-mask.hpp"
#include "strings.hpp"
#include "obs/gs/gs-helper.hpp"
#include "util/util-logging.hpp"
#include "warning-disable.hpp"
#include <array>
#include <sstream>
#include <stdexcept>
#include <vector>
#include "warning-enable.hpp"
#ifdef _DEBUG
#define ST_PREFIX "<%s> "
#define D_LOG_ERROR(x, ...) P_LOG_ERROR(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#define D_LOG_WARNING(x, ...) P_LOG_WARN(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#define D_LOG_INFO(x, ...) P_LOG_INFO(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#define D_LOG_DEBUG(x, ...) P_LOG_DEBUG(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#else
#define ST_PREFIX "<filter::dynamic_mask> "
#define D_LOG_ERROR(...) P_LOG_ERROR(ST_PREFIX __VA_ARGS__)
#define D_LOG_WARNING(...) P_LOG_WARN(ST_PREFIX __VA_ARGS__)
#define D_LOG_INFO(...) P_LOG_INFO(ST_PREFIX __VA_ARGS__)
#define D_LOG_DEBUG(...) P_LOG_DEBUG(ST_PREFIX __VA_ARGS__)
#endif
// Filter to allow dynamic masking
// Allow any channel to affect any other channel
//
// Red/Green/Blue/Alpha Mask Input
// - Red Mask Output
// - Blue Mask Output
// - Green Mask Output
// - Alpha Mask Output
#define ST_I18N "Filter.DynamicMask"
#define ST_I18N_INPUT "Filter.DynamicMask.Input"
#define ST_KEY_INPUT "Filter.DynamicMask.Input"
#define ST_I18N_CHANNEL "Filter.DynamicMask.Channel"
#define ST_KEY_CHANNEL "Filter.DynamicMask.Channel"
#define ST_I18N_CHANNEL_VALUE "Filter.DynamicMask.Channel.Value"
#define ST_KEY_CHANNEL_VALUE "Filter.DynamicMask.Channel.Value"
#define ST_I18N_CHANNEL_MULTIPLIER "Filter.DynamicMask.Channel.Multiplier"
#define ST_KEY_CHANNEL_MULTIPLIER "Filter.DynamicMask.Channel.Multiplier"
#define ST_I18N_CHANNEL_INPUT "Filter.DynamicMask.Channel.Input"
#define ST_KEY_CHANNEL_INPUT "Filter.DynamicMask.Channel.Input"
#define ST_KEY_DEBUG_TEXTURE "Debug.Texture"
#define ST_I18N_DEBUG_TEXTURE ST_I18N ".Debug.Texture"
#define ST_I18N_DEBUG_TEXTURE_BASE ST_I18N_DEBUG_TEXTURE ".Base"
#define ST_I18N_DEBUG_TEXTURE_INPUT ST_I18N_DEBUG_TEXTURE ".Input"
using namespace streamfx::filter::dynamic_mask;
static constexpr std::string_view HELP_URL = "https://github.com/Xaymar/obs-StreamFX/wiki/Filter-Dynamic-Mask";
static std::pair<channel, const char*> channel_translations[] = {
{channel::Red, S_CHANNEL_RED},
{channel::Green, S_CHANNEL_GREEN},
{channel::Blue, S_CHANNEL_BLUE},
{channel::Alpha, S_CHANNEL_ALPHA},
};
data::data()
{
auto gctx = streamfx::obs::gs::context();
_channel_mask_fx = streamfx::obs::gs::effect::create(streamfx::data_file_path("effects/channel-mask.effect"));
}
data::~data() {}
streamfx::obs::gs::effect data::channel_mask_fx()
{
return _channel_mask_fx;
}
std::shared_ptr<streamfx::filter::dynamic_mask::data> data::get()
{
static std::mutex instance_lock;
static std::weak_ptr<streamfx::filter::dynamic_mask::data> weak_instance;
std::lock_guard<std::mutex> lock(instance_lock);
auto instance = weak_instance.lock();
if (!instance) {
instance = std::shared_ptr<streamfx::filter::dynamic_mask::data>{new streamfx::filter::dynamic_mask::data()};
weak_instance = instance;
}
return instance;
}
dynamic_mask_instance::dynamic_mask_instance(obs_data_t* settings, obs_source_t* self)
: obs::source_instance(settings, self), //
_data(streamfx::filter::dynamic_mask::data::get()), //
_gfx_util(::streamfx::gfx::util::get()), //
_translation_map(), //
_input(), //
_input_child(), //
_input_vs(), //
_input_ac(), //
_have_base(false), //
_base_rt(), //
_base_tex(), //
_base_color_space(GS_CS_SRGB), //
_base_color_format(GS_RGBA), //
_have_input(false), //
_input_rt(), //
_input_tex(), //
_input_color_space(GS_CS_SRGB), //
_input_color_format(GS_RGBA), //
_have_final(false), //
_final_rt(), //
_final_tex(), //
_channels(), //
_precalc(), //
_debug_texture(-1) //
{
update(settings);
}
dynamic_mask_instance::~dynamic_mask_instance()
{
release();
}
void dynamic_mask_instance::load(obs_data_t* settings)
{
update(settings);
}
void dynamic_mask_instance::migrate(obs_data_t* data, uint64_t version) {}
void dynamic_mask_instance::update(obs_data_t* settings)
{
// Update source.
if (const char* v = obs_data_get_string(settings, ST_KEY_INPUT); (v != nullptr) && (v[0] != '\0')) {
if (!acquire(v))
DLOG_ERROR("Failed to acquire Input source '%s'.", v);
} else {
release();
}
// Update data store
for (auto kv1 : channel_translations) {
auto found = _channels.find(kv1.first);
if (found == _channels.end()) {
_channels.insert({kv1.first, channel_data()});
found = _channels.find(kv1.first);
if (found == _channels.end()) {
assert(found != _channels.end());
throw std::runtime_error("Unable to insert element into data _store.");
}
}
std::string chv_key = std::string(ST_KEY_CHANNEL_VALUE) + "." + kv1.second;
found->second.value = static_cast<float>(obs_data_get_double(settings, chv_key.c_str()));
_precalc.base.ptr[static_cast<size_t>(kv1.first)] = found->second.value;
std::string chm_key = std::string(ST_KEY_CHANNEL_MULTIPLIER) + "." + kv1.second;
found->second.scale = static_cast<float>(obs_data_get_double(settings, chm_key.c_str()));
_precalc.scale.ptr[static_cast<size_t>(kv1.first)] = found->second.scale;
vec4* ch = &_precalc.matrix.x;
switch (kv1.first) {
case channel::Red:
ch = &_precalc.matrix.x;
break;
case channel::Green:
ch = &_precalc.matrix.y;
break;
case channel::Blue:
ch = &_precalc.matrix.z;
break;
case channel::Alpha:
ch = &_precalc.matrix.t;
break;
default:
break;
}
for (auto kv2 : channel_translations) {
std::string ab_key = std::string(ST_KEY_CHANNEL_INPUT) + "." + kv1.second + "." + kv2.second;
found->second.values.ptr[static_cast<size_t>(kv2.first)] = static_cast<float>(obs_data_get_double(settings, ab_key.c_str()));
ch->ptr[static_cast<size_t>(kv2.first)] = found->second.values.ptr[static_cast<size_t>(kv2.first)];
}
}
_debug_texture = obs_data_get_int(settings, ST_KEY_DEBUG_TEXTURE);
}
void dynamic_mask_instance::save(obs_data_t* settings)
{
if (auto source = _input.lock(); source) {
obs_data_set_string(settings, ST_KEY_INPUT, source.name().data());
}
for (auto kv1 : channel_translations) {
auto found = _channels.find(kv1.first);
if (found == _channels.end()) {
_channels.insert({kv1.first, channel_data()});
found = _channels.find(kv1.first);
if (found == _channels.end()) {
assert(found != _channels.end());
throw std::runtime_error("Unable to insert element into data _store.");
}
}
std::string chv_key = std::string(ST_KEY_CHANNEL_VALUE) + "." + kv1.second;
obs_data_set_double(settings, chv_key.c_str(), static_cast<double_t>(found->second.value));
std::string chm_key = std::string(ST_KEY_CHANNEL_MULTIPLIER) + "." + kv1.second;
obs_data_set_double(settings, chm_key.c_str(), static_cast<double_t>(found->second.scale));
for (auto kv2 : channel_translations) {
std::string ab_key = std::string(ST_KEY_CHANNEL_INPUT) + "." + kv1.second + "." + kv2.second;
obs_data_set_double(settings, ab_key.c_str(), static_cast<double_t>(found->second.values.ptr[static_cast<size_t>(kv2.first)]));
}
}
}
gs_color_space dynamic_mask_instance::video_get_color_space(size_t count, const gs_color_space* preferred_spaces)
{
return _base_color_space;
}
void dynamic_mask_instance::video_tick(float time)
{
{ // Base Information
_have_base = false;
std::array<gs_color_space, 1> preferred_formats = {GS_CS_SRGB};
_base_color_space = obs_source_get_color_space(obs_filter_get_target(_self), preferred_formats.size(), preferred_formats.data());
switch (_base_color_space) {
case GS_CS_SRGB:
_base_color_format = GS_RGBA;
break;
case GS_CS_SRGB_16F:
case GS_CS_709_EXTENDED:
case GS_CS_709_SCRGB:
_base_color_format = GS_RGBA16F;
break;
default:
_base_color_format = GS_RGBA_UNORM;
}
if ((obs_source_get_output_flags(obs_filter_get_target(_self)) & OBS_SOURCE_SRGB) == OBS_SOURCE_SRGB) {
_base_srgb = (_base_color_space <= GS_CS_SRGB_16F);
} else {
_base_srgb = false;
}
}
if (auto input = _input.lock(); input) { // Input Information
_have_input = false;
std::array<gs_color_space, 1> preferred_formats = {GS_CS_SRGB};
_input_color_space = obs_source_get_color_space(input, preferred_formats.size(), preferred_formats.data());
switch (_input_color_space) {
case GS_CS_SRGB:
_input_color_format = GS_RGBA;
break;
case GS_CS_SRGB_16F:
case GS_CS_709_EXTENDED:
case GS_CS_709_SCRGB:
_input_color_format = GS_RGBA16F;
break;
default:
_input_color_format = GS_RGBA_UNORM;
}
if ((input.output_flags() & OBS_SOURCE_SRGB) == OBS_SOURCE_SRGB) {
_input_srgb = (_base_color_space <= GS_CS_SRGB_16F);
} else {
_input_srgb = false;
}
} else {
_have_input = false;
}
_have_final = false;
_final_srgb = _base_srgb;
}
void dynamic_mask_instance::video_render(gs_effect_t* in_effect)
{
gs_effect_t* default_effect = obs_get_base_effect(obs_base_effect::OBS_EFFECT_DEFAULT);
auto effect = _data->channel_mask_fx();
obs_source_t* parent = obs_filter_get_parent(_self);
obs_source_t* target = obs_filter_get_target(_self);
uint32_t width = obs_source_get_base_width(target);
uint32_t height = obs_source_get_base_height(target);
auto input = _input.lock();
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
streamfx::obs::gs::debug_marker gdmp{streamfx::obs::gs::debug_color_source, "Dynamic Mask '%s' on '%s'", obs_source_get_name(_self), obs_source_get_name(obs_filter_get_parent(_self))};
#endif
// If there's some issue acquiring information, skip rendering entirely.
if (!_self || !parent || !target || !width || !height) {
_self.skip_video_filter();
return;
} else if (input && (!input.width() || !input.height())) {
_self.skip_video_filter();
return;
}
// Capture the base texture for later rendering.
if (!_have_base) {
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
streamfx::obs::gs::debug_marker gdm{streamfx::obs::gs::debug_color_cache, "Base Texture"};
#endif
// Ensure the Render Target matches the expected format.
if (!_base_rt || (_base_rt->color_format() != _base_color_format)) {
_base_rt = std::make_shared<streamfx::obs::gs::texrender>(_base_color_format, GS_ZS_NONE);
}
bool previous_srgb = gs_framebuffer_srgb_enabled();
auto previous_lsrgb = gs_get_linear_srgb();
gs_set_linear_srgb(_base_srgb);
gs_enable_framebuffer_srgb(false);
// Begin rendering the source with a certain color space.
if (obs_source_process_filter_begin_with_color_space(_self, _base_color_format, _base_color_space, OBS_ALLOW_DIRECT_RENDERING)) {
try {
{
auto op = _base_rt->render(width, height, _base_color_space);
// Push a new blend state to stack.
gs_blend_state_push();
gs_reset_blend_state();
gs_enable_blending(false);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
try {
// Enable all channels.
gs_enable_color(true, true, true, true);
// Disable culling.
gs_set_cull_mode(GS_NEITHER);
// Disable depth testing.
gs_enable_depth_test(false);
gs_depth_function(GS_ALWAYS);
// Disable stencil testing
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_KEEP, GS_KEEP, GS_KEEP);
// Set up rendering matrix.
gs_ortho(0, static_cast<float>(width), 0, static_cast<float>(height), -1., 1.);
{ // Clear to black.
vec4 clr = {0., 0., 0., 0.};
gs_clear(GS_CLEAR_COLOR, &clr, 0., 0);
}
// Render the source.
_self.process_filter_end(default_effect, width, height);
// Pop the old blend state.
gs_blend_state_pop();
} catch (...) {
gs_blend_state_pop();
throw;
}
}
_have_base = true;
_base_rt->get_texture(_base_tex);
} catch (const std::exception& ex) {
_self.process_filter_end(default_effect, width, height);
DLOG_ERROR("Failed to capture base texture: %s", ex.what());
} catch (...) {
_self.process_filter_end(default_effect, width, height);
DLOG_ERROR("Failed to capture base texture.", nullptr);
}
}
gs_set_linear_srgb(previous_lsrgb);
gs_enable_framebuffer_srgb(previous_srgb);
}
// Capture the input texture for later rendering.
if (!_have_input) {
if (!input) {
// Treat no selection as selecting the target filter.
_have_input = _have_base;
_input_tex = _base_tex;
_input_color_format = _base_color_format;
_input_color_space = _base_color_space;
} else {
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
streamfx::obs::gs::debug_marker gdm{streamfx::obs::gs::debug_color_source, "Input '%s'", input.name().data()};
#endif
// Ensure the Render Target matches the expected format.
if (!_input_rt || (_input_rt->color_format() != _input_color_format)) {
_input_rt = std::make_shared<streamfx::obs::gs::texrender>(_input_color_format, GS_ZS_NONE);
}
auto previous_lsrgb = gs_get_linear_srgb();
gs_set_linear_srgb(_input_srgb);
bool previous_srgb = gs_framebuffer_srgb_enabled();
gs_enable_framebuffer_srgb(false);
try {
{
auto op = _input_rt->render(input.width(), input.height(), _input_color_space);
// Push a new blend state to stack.
gs_blend_state_push();
gs_reset_blend_state();
gs_enable_blending(false);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
try {
// Enable all channels.
gs_enable_color(true, true, true, true);
// Disable culling.
gs_set_cull_mode(GS_NEITHER);
// Disable depth testing.
gs_enable_depth_test(false);
gs_depth_function(GS_ALWAYS);
// Disable stencil testing
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_KEEP, GS_KEEP, GS_KEEP);
// Set up rendering matrix.
gs_ortho(0, static_cast<float>(input.width()), 0, static_cast<float>(input.height()), -1., 1.);
{ // Clear to black.
vec4 clr = {0., 0., 0., 0.};
gs_clear(GS_CLEAR_COLOR, &clr, 0., 0);
}
// Render the source.
obs_source_video_render(input);
// Pop the old blend state.
gs_blend_state_pop();
} catch (...) {
gs_blend_state_pop();
throw;
}
}
_have_input = true;
_input_rt->get_texture(_input_tex);
} catch (const std::exception& ex) {
DLOG_ERROR("Failed to capture input texture: %s", ex.what());
} catch (...) {
DLOG_ERROR("Failed to capture input texture.", nullptr);
}
gs_enable_framebuffer_srgb(previous_srgb);
gs_set_linear_srgb(previous_lsrgb);
}
}
// Capture the final texture.
if (!_have_final && _have_base) {
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
streamfx::obs::gs::debug_marker gdm{streamfx::obs::gs::debug_color_render, "Final Calculation"};
#endif
// Ensure the Render Target matches the expected format.
if (!_final_rt || (_final_rt->color_format() != _base_color_format)) {
_final_rt = std::make_shared<streamfx::obs::gs::texrender>(_base_color_format, GS_ZS_NONE);
}
bool previous_srgb = gs_framebuffer_srgb_enabled();
auto previous_lsrgb = gs_get_linear_srgb();
gs_enable_framebuffer_srgb(_final_srgb);
gs_set_linear_srgb(_final_srgb);
try {
{
auto op = _final_rt->render(width, height);
// Push a new blend state to stack.
gs_blend_state_push();
gs_reset_blend_state();
gs_enable_blending(false);
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
try {
// Enable all channels.
gs_enable_color(true, true, true, true);
// Disable culling.
gs_set_cull_mode(GS_NEITHER);
// Disable depth testing.
gs_enable_depth_test(false);
gs_depth_function(GS_ALWAYS);
// Disable stencil testing
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_KEEP, GS_KEEP, GS_KEEP);
// Set up rendering matrix.
gs_ortho(0, 1, 0, 1, -1., 1.);
{ // Clear to black.
vec4 clr = {0., 0., 0., 0.};
gs_clear(GS_CLEAR_COLOR, &clr, 0., 0);
}
effect.get_parameter("pMaskInputA").set_texture(_base_tex, _base_srgb);
effect.get_parameter("pMaskInputB").set_texture(_input_tex, _input_srgb);
effect.get_parameter("pMaskBase").set_float4(_precalc.base);
effect.get_parameter("pMaskMatrix").set_matrix(_precalc.matrix);
effect.get_parameter("pMaskMultiplier").set_float4(_precalc.scale);
while (gs_effect_loop(effect.get(), "Mask")) {
_gfx_util->draw_fullscreen_triangle();
}
// Pop the old blend state.
gs_blend_state_pop();
} catch (...) {
gs_blend_state_pop();
throw;
}
}
_final_tex = _final_rt->get_texture();
_have_final = true;
} catch (const std::exception& ex) {
DLOG_ERROR("Failed to render final texture: %s", ex.what());
} catch (...) {
DLOG_ERROR("Failed to render final texture.", nullptr);
}
gs_set_linear_srgb(previous_lsrgb);
gs_enable_framebuffer_srgb(previous_srgb);
}
// Enable texture debugging
switch (_debug_texture) {
case 0:
_have_final = _have_base;
_final_tex = _base_tex;
break;
case 1:
_have_final = _have_input;
_final_tex = _input_tex;
break;
}
// Abort if we don't have a final render.
if (!_have_final || !_final_tex->get_object()) {
obs_source_skip_video_filter(_self);
return;
}
// Draw source
{
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
streamfx::obs::gs::debug_marker gdm{streamfx::obs::gs::debug_color_render, "Render"};
#endif
// It is important that we do not modify the blend state here, as it is set correctly by OBS
gs_set_cull_mode(GS_NEITHER);
gs_enable_color(true, true, true, true);
gs_enable_depth_test(false);
gs_depth_function(GS_ALWAYS);
gs_enable_stencil_test(false);
gs_enable_stencil_write(false);
gs_stencil_function(GS_STENCIL_BOTH, GS_ALWAYS);
gs_stencil_op(GS_STENCIL_BOTH, GS_ZERO, GS_ZERO, GS_ZERO);
const bool previous_srgb = gs_framebuffer_srgb_enabled();
gs_enable_framebuffer_srgb(gs_get_linear_srgb());
gs_effect_t* final_effect = in_effect ? in_effect : default_effect;
gs_eparam_t* param = gs_effect_get_param_by_name(final_effect, "image");
if (!param) {
DLOG_ERROR("<filter-dynamic-mask:%s> Failed to set image param.", obs_source_get_name(_self));
gs_enable_framebuffer_srgb(previous_srgb);
obs_source_skip_video_filter(_self);
return;
} else {
if (gs_get_linear_srgb()) {
gs_effect_set_texture_srgb(param, *_final_tex);
} else {
gs_effect_set_texture(param, *_final_tex);
}
}
while (gs_effect_loop(final_effect, "Draw")) {
gs_draw_sprite(0, 0, width, height);
}
gs_enable_framebuffer_srgb(previous_srgb);
}
}
void dynamic_mask_instance::enum_active_sources(obs_source_enum_proc_t enum_callback, void* param)
{
if (_input)
enum_callback(_self, _input.lock().get(), param);
}
void dynamic_mask_instance::enum_all_sources(obs_source_enum_proc_t enum_callback, void* param)
{
if (_input)
enum_callback(_self, _input.lock().get(), param);
}
void streamfx::filter::dynamic_mask::dynamic_mask_instance::show()
{
if (!_input || !_self.showing() || !(_self.get_filter_parent().showing()))
return;
auto input = _input.lock();
_input_vs = streamfx::obs::source_showing_reference::add_showing_reference(input);
}
void streamfx::filter::dynamic_mask::dynamic_mask_instance::hide()
{
_input_vs.reset();
}
void streamfx::filter::dynamic_mask::dynamic_mask_instance::activate()
{
if (!_input || !_self.active() || !(_self.get_filter_parent().active()))
return;
auto input = _input.lock();
_input_ac = streamfx::obs::source_active_reference::add_active_reference(input);
}
void streamfx::filter::dynamic_mask::dynamic_mask_instance::deactivate()
{
_input_ac.reset();
}
bool dynamic_mask_instance::acquire(std::string_view name)
{
try {
// Try and acquire the source.
_input = streamfx::obs::weak_source(name);
// Ensure that this wouldn't cause recursion.
_input_child = std::make_unique<streamfx::obs::source_active_child>(_self, _input.lock());
// Handle the active and showing stuff.
activate();
show();
return true;
} catch (...) {
release();
return false;
}
}
void dynamic_mask_instance::release()
{
// Handle the active and showing stuff.
deactivate();
hide();
// Release any references.
_input_child.reset();
_input.reset();
}
dynamic_mask_factory::dynamic_mask_factory()
{
_info.id = S_PREFIX "filter-dynamic-mask";
_info.type = OBS_SOURCE_TYPE_FILTER;
_info.output_flags = OBS_SOURCE_VIDEO | OBS_SOURCE_SRGB;
support_active_child_sources(true);
support_child_sources(true);
support_size(false);
support_activity_tracking(true);
support_visibility_tracking(true);
support_color_space(true);
finish_setup();
register_proxy("obs-stream-effects-filter-dynamic-mask");
}
dynamic_mask_factory::~dynamic_mask_factory() {}
const char* dynamic_mask_factory::get_name()
{
return D_TRANSLATE(ST_I18N);
}
void dynamic_mask_factory::get_defaults2(obs_data_t* data)
{
obs_data_set_default_int(data, ST_KEY_CHANNEL, static_cast<int64_t>(channel::Red));
for (auto kv : channel_translations) {
obs_data_set_default_double(data, (std::string(ST_KEY_CHANNEL_VALUE) + "." + kv.second).c_str(), 1.0);
obs_data_set_default_double(data, (std::string(ST_KEY_CHANNEL_MULTIPLIER) + "." + kv.second).c_str(), 1.0);
for (auto kv2 : channel_translations) {
obs_data_set_default_double(data, (std::string(ST_KEY_CHANNEL_INPUT) + "." + kv.second + "." + kv2.second).c_str(), 0.0);
}
}
obs_data_set_default_int(data, ST_KEY_DEBUG_TEXTURE, -1);
}
obs_properties_t* dynamic_mask_factory::get_properties2(dynamic_mask_instance* data)
{
obs_properties_t* props = obs_properties_create();
obs_property_t* p;
_translation_cache.clear();
{
obs_properties_add_button2(props, S_MANUAL_OPEN, D_TRANSLATE(S_MANUAL_OPEN), streamfx::filter::dynamic_mask::dynamic_mask_factory::on_manual_open, nullptr);
}
{ // Input
p = obs_properties_add_list(props, ST_KEY_INPUT, D_TRANSLATE(ST_I18N_INPUT), OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_STRING);
obs_property_list_add_string(p, "", "");
obs::source_tracker::instance()->enumerate(
[&p](std::string name, ::streamfx::obs::source) {
std::stringstream sstr;
sstr << name << " (" << D_TRANSLATE(S_SOURCETYPE_SOURCE) << ")";
obs_property_list_add_string(p, sstr.str().c_str(), name.c_str());
return false;
},
obs::source_tracker::filter_video_sources);
obs::source_tracker::instance()->enumerate(
[&p](std::string name, ::streamfx::obs::source) {
std::stringstream sstr;
sstr << name << " (" << D_TRANSLATE(S_SOURCETYPE_SCENE) << ")";
obs_property_list_add_string(p, sstr.str().c_str(), name.c_str());
return false;
},
obs::source_tracker::filter_scenes);
}
const char* pri_chs[] = {S_CHANNEL_RED, S_CHANNEL_GREEN, S_CHANNEL_BLUE, S_CHANNEL_ALPHA};
for (auto pri_ch : pri_chs) {
auto grp = obs_properties_create();
{
_translation_cache.push_back(translate_string(D_TRANSLATE(ST_I18N_CHANNEL_VALUE), D_TRANSLATE(pri_ch)));
std::string buf = std::string(ST_KEY_CHANNEL_VALUE) + "." + pri_ch;
p = obs_properties_add_float_slider(grp, buf.c_str(), _translation_cache.back().c_str(), -100.0, 100.0, 0.01);
obs_property_set_long_description(p, _translation_cache.back().c_str());
}
const char* sec_chs[] = {S_CHANNEL_RED, S_CHANNEL_GREEN, S_CHANNEL_BLUE, S_CHANNEL_ALPHA};
for (auto sec_ch : sec_chs) {
_translation_cache.push_back(translate_string(D_TRANSLATE(ST_I18N_CHANNEL_INPUT), D_TRANSLATE(sec_ch)));
std::string buf = std::string(ST_KEY_CHANNEL_INPUT) + "." + pri_ch + "." + sec_ch;
p = obs_properties_add_float_slider(grp, buf.c_str(), _translation_cache.back().c_str(), -100.0, 100.0, 0.01);
obs_property_set_long_description(p, _translation_cache.back().c_str());
}
{
_translation_cache.push_back(translate_string(D_TRANSLATE(ST_I18N_CHANNEL_MULTIPLIER), D_TRANSLATE(pri_ch)));
std::string buf = std::string(ST_KEY_CHANNEL_MULTIPLIER) + "." + pri_ch;
p = obs_properties_add_float_slider(grp, buf.c_str(), _translation_cache.back().c_str(), -100.0, 100.0, 0.01);
obs_property_set_long_description(p, _translation_cache.back().c_str());
}
{
_translation_cache.push_back(translate_string(D_TRANSLATE(ST_I18N_CHANNEL), D_TRANSLATE(pri_ch)));
std::string buf = std::string(ST_KEY_CHANNEL) + "." + pri_ch;
obs_properties_add_group(props, buf.c_str(), _translation_cache.back().c_str(), obs_group_type::OBS_GROUP_NORMAL, grp);
}
}
{
auto grp = obs_properties_create();
obs_properties_add_group(props, "Debug", D_TRANSLATE(S_ADVANCED), OBS_GROUP_NORMAL, grp);
{
auto p = obs_properties_add_list(grp, ST_KEY_DEBUG_TEXTURE, D_TRANSLATE(ST_I18N_DEBUG_TEXTURE), OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_INT);
obs_property_list_add_int(p, D_TRANSLATE(S_STATE_DISABLED), -1);
obs_property_list_add_int(p, D_TRANSLATE(ST_I18N_DEBUG_TEXTURE_BASE), 0);
obs_property_list_add_int(p, D_TRANSLATE(ST_I18N_DEBUG_TEXTURE_INPUT), 1);
}
}
return props;
}
std::string dynamic_mask_factory::translate_string(const char* format, ...)
{
va_list vargs;
va_start(vargs, format);
std::vector<char> buffer(2048);
std::size_t len = static_cast<size_t>(vsnprintf(buffer.data(), buffer.size(), format, vargs));
va_end(vargs);
return std::string(buffer.data(), buffer.data() + len);
}
bool dynamic_mask_factory::on_manual_open(obs_properties_t* props, obs_property_t* property, void* data)
{
try {
streamfx::open_url(HELP_URL);
return false;
} catch (const std::exception& ex) {
D_LOG_ERROR("Failed to open manual due to error: %s", ex.what());
return false;
} catch (...) {
D_LOG_ERROR("Failed to open manual due to unknown error.", "");
return false;
}
}
std::shared_ptr<dynamic_mask_factory> dynamic_mask_factory::instance()
{
static std::weak_ptr<dynamic_mask_factory> winst;
static std::mutex mtx;
std::unique_lock<decltype(mtx)> lock(mtx);
auto instance = winst.lock();
if (!instance) {
instance = std::shared_ptr<dynamic_mask_factory>(new dynamic_mask_factory());
winst = instance;
}
return instance;
}
static std::shared_ptr<dynamic_mask_factory> loader_instance;
static auto loader = streamfx::component(
"dynamic_mask",
[]() { // Initializer
loader_instance = dynamic_mask_factory::instance();
},
[]() { // Finalizer
loader_instance.reset();
},
{"core::source_tracker", "core::gs::texrender", "core::gs::texture", "core::gs::sampler"});

View File

@ -0,0 +1,136 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2019-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "common.hpp"
#include "gfx/gfx-source-texture.hpp"
#include "gfx/gfx-util.hpp"
#include "obs/gs/gs-effect.hpp"
#include "obs/obs-source-active-child.hpp"
#include "obs/obs-source-active-reference.hpp"
#include "obs/obs-source-factory.hpp"
#include "obs/obs-source-showing-reference.hpp"
#include "obs/obs-source-tracker.hpp"
#include "obs/obs-source.hpp"
#include "obs/obs-tools.hpp"
#include "warning-disable.hpp"
#include <list>
#include <map>
#include "warning-enable.hpp"
namespace streamfx::filter::dynamic_mask {
enum class channel : int8_t { Invalid = -1, Red, Green, Blue, Alpha };
class data {
streamfx::obs::gs::effect _channel_mask_fx;
private:
data();
public:
~data();
streamfx::obs::gs::effect channel_mask_fx();
public:
static std::shared_ptr<streamfx::filter::dynamic_mask::data> get();
};
class dynamic_mask_instance : public obs::source_instance {
std::shared_ptr<streamfx::filter::dynamic_mask::data> _data;
std::shared_ptr<streamfx::gfx::util> _gfx_util;
std::map<std::tuple<channel, channel, std::string>, std::string> _translation_map;
streamfx::obs::weak_source _input;
std::unique_ptr<streamfx::obs::source_active_child> _input_child;
std::shared_ptr<streamfx::obs::source_showing_reference> _input_vs;
std::shared_ptr<streamfx::obs::source_active_reference> _input_ac;
// Base texture for filtering
bool _have_base;
std::shared_ptr<streamfx::obs::gs::texrender> _base_rt;
std::shared_ptr<streamfx::obs::gs::texture> _base_tex;
gs_color_space _base_color_space;
gs_color_format _base_color_format;
bool _base_srgb;
bool _have_input;
std::shared_ptr<streamfx::obs::gs::texrender> _input_rt;
std::shared_ptr<streamfx::obs::gs::texture> _input_tex;
gs_color_space _input_color_space;
gs_color_format _input_color_format;
bool _input_srgb;
bool _have_final;
std::shared_ptr<streamfx::obs::gs::texrender> _final_rt;
std::shared_ptr<streamfx::obs::gs::texture> _final_tex;
bool _final_srgb;
int64_t _debug_texture;
struct channel_data {
float value = 0.0;
float scale = 1.0;
vec4 values = {0, 0, 0, 0};
};
std::map<channel, channel_data> _channels;
struct _precalc {
vec4 base;
vec4 scale;
matrix4 matrix;
} _precalc;
public:
dynamic_mask_instance(obs_data_t* data, obs_source_t* self);
virtual ~dynamic_mask_instance();
virtual void load(obs_data_t* settings) override;
virtual void migrate(obs_data_t* data, uint64_t version) override;
virtual void update(obs_data_t* settings) override;
virtual void save(obs_data_t* settings) override;
virtual gs_color_space video_get_color_space(size_t count, const gs_color_space* preferred_spaces) override;
virtual void video_tick(float time) override;
virtual void video_render(gs_effect_t* effect) override;
void enum_active_sources(obs_source_enum_proc_t enum_callback, void* param) override;
void enum_all_sources(obs_source_enum_proc_t enum_callback, void* param) override;
void show() override;
void hide() override;
void activate() override;
void deactivate() override;
bool acquire(std::string_view name);
void release();
};
class dynamic_mask_factory : public obs::source_factory<filter::dynamic_mask::dynamic_mask_factory, filter::dynamic_mask::dynamic_mask_instance> {
std::list<std::string> _translation_cache;
public:
dynamic_mask_factory();
virtual ~dynamic_mask_factory() override;
virtual const char* get_name() override;
virtual void get_defaults2(obs_data_t* data) override;
virtual obs_properties_t* get_properties2(filter::dynamic_mask::dynamic_mask_instance* data) override;
std::string translate_string(const char* format, ...);
static bool on_manual_open(obs_properties_t* props, obs_property_t* property, void* data);
public: // Singleton
static void initialize();
static void finalize();
static std::shared_ptr<dynamic_mask_factory> instance();
};
} // namespace streamfx::filter::dynamic_mask

View File

@ -0,0 +1,24 @@
# AUTOGENERATED COPYRIGHT HEADER START
# Copyright (C) 2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
# AUTOGENERATED COPYRIGHT HEADER END
cmake_minimum_required(VERSION 3.26)
project("FFmpeg")
list(APPEND CMAKE_MESSAGE_INDENT "[${PROJECT_NAME}] ")
streamfx_add_component(${PROJECT_NAME})
find_package("FFmpeg"
COMPONENTS "avutil" "avcodec" "swscale"
)
if(NOT FFmpeg_FOUND)
streamfx_disable_component(${COMPONENT_TARGET} "FFmpeg is not available.")
return()
else()
target_link_libraries(${COMPONENT_TARGET}
PUBLIC
FFmpeg::avutil
FFmpeg::avcodec
FFmpeg::swscale
)
endif()

View File

@ -0,0 +1,19 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#include "av1.hpp"
const char* streamfx::encoder::codec::av1::profile_to_string(profile p)
{
switch (p) {
case profile::MAIN:
return D_TRANSLATE(S_CODEC_AV1_PROFILE ".Main");
case profile::HIGH:
return D_TRANSLATE(S_CODEC_AV1_PROFILE ".High");
case profile::PROFESSIONAL:
return D_TRANSLATE(S_CODEC_AV1_PROFILE ".Professional");
default:
return "Unknown";
}
}

View File

@ -0,0 +1,21 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "common.hpp"
#define S_CODEC_AV1 "Codec.AV1"
#define S_CODEC_AV1_PROFILE "Codec.AV1.Profile"
#define S_CODEC_AV1_LEVEL "Codec.AV1.Level"
namespace streamfx::encoder::codec::av1 {
enum class profile {
MAIN = 0,
HIGH = 1,
PROFESSIONAL = 2,
UNKNOWN = -1,
};
const char* profile_to_string(profile p);
} // namespace streamfx::encoder::codec::av1

View File

@ -0,0 +1,6 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2022 Carsten Braun <info@braun-cloud.de>
// Copyright (C) 2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#include "dnxhr.hpp"

View File

@ -0,0 +1,13 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 Carsten Braun <info@braun-cloud.de>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "common.hpp"
// Codec: DNxHR
#define S_CODEC_DNXHR "Codec.DNxHR"
#define S_CODEC_DNXHR_PROFILE "Codec.DNxHR.Profile"
namespace streamfx::encoder::codec::dnxhr {} // namespace streamfx::encoder::codec::dnxhr

View File

@ -0,0 +1,70 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#include "h264.hpp"
uint8_t* is_nal_start(uint8_t* ptr, uint8_t* end_ptr, size_t& size)
{
// Ensure that the remaining space actually can contain a prefix and NAL header.
if ((ptr + (3 + 1)) >= end_ptr)
return nullptr;
if (*ptr != 0x0)
return nullptr;
if (*(ptr + 1) != 0x0)
return nullptr;
// 3-Byte NAL prefix.
if (*(ptr + 2) == 0x1) {
size = 3;
return ptr + 3;
}
// 4-Byte NAL Prefix
if ((ptr + (4 + 1)) >= end_ptr)
return nullptr;
if (*(ptr + 2) != 0x0)
return nullptr;
if (*(ptr + 3) != 0x01)
return nullptr;
size = 4;
return ptr + 4;
}
uint8_t* streamfx::encoder::codec::h264::find_closest_nal(uint8_t* ptr, uint8_t* end_ptr, size_t& size)
{
for (uint8_t* seek_ptr = ptr; seek_ptr < end_ptr; seek_ptr++) {
if (auto nal_ptr = is_nal_start(seek_ptr, end_ptr, size); nal_ptr != nullptr)
return nal_ptr;
}
return nullptr;
}
uint32_t streamfx::encoder::codec::h264::get_packet_reference_count(uint8_t* ptr, uint8_t* end_ptr)
{
size_t nal_ptr_prefix = 0;
uint8_t* nal_ptr = find_closest_nal(ptr, end_ptr, nal_ptr_prefix);
while ((nal_ptr != nullptr) && (nal_ptr < end_ptr)) {
// Try and figure out the actual size of the NAL.
size_t nal_end_ptr_prefix = 0;
uint8_t* nal_end_ptr = find_closest_nal(nal_ptr, end_ptr, nal_end_ptr_prefix);
size_t nal_size = (nal_end_ptr ? nal_end_ptr : end_ptr) - nal_ptr - nal_end_ptr_prefix;
// Try and figure out the ideal priority.
switch (static_cast<nal_unit_type>((*nal_ptr) & 0x5)) {
case nal_unit_type::CODED_SLICE_NONIDR:
return static_cast<uint32_t>((*nal_ptr >> 5) & 0x2);
case nal_unit_type::CODED_SLICE_IDR:
return static_cast<uint32_t>((*nal_ptr >> 5) & 0x2);
default:
break;
}
// Update our NAL pointer.
nal_ptr = nal_end_ptr;
}
return std::numeric_limits<uint32_t>::max();
}

View File

@ -0,0 +1,82 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "common.hpp"
// Codec: H264
#define S_CODEC_H264 "Codec.H264"
#define S_CODEC_H264_PROFILE "Codec.H264.Profile"
#define S_CODEC_H264_LEVEL "Codec.H264.Level"
namespace streamfx::encoder::codec::h264 {
enum class profile {
CONSTRAINED_BASELINE,
BASELINE,
MAIN,
HIGH,
HIGH444_PREDICTIVE,
UNKNOWN = -1,
};
enum class level {
L1_0 = 10,
L1_0b,
L1_1,
L1_2,
L1_3,
L2_0 = 20,
L2_1,
L2_2,
L3_0 = 30,
L3_1,
L3_2,
L4_0 = 40,
L4_1,
L4_2,
L5_0 = 50,
L5_1,
L5_2,
L6_0 = 60,
L6_1,
L6_2,
UNKNOWN = -1,
};
// See ITU-T H.264
enum class nal_unit_type : uint8_t {
UNSPECIFIED = 0,
CODED_SLICE_NONIDR = 1,
CODED_SLICE_DATA_PARTITION_A = 2,
CODED_SLICE_DATA_PARTITION_B = 3,
CODED_SLICE_DATA_PARTITION_C = 4,
CODED_SLICE_IDR = 5,
SUPPLEMENTAL_ENHANCEMENT_INFORMATION = 6,
SEQUENCE_PARAMETER_SET = 7,
PICTURE_PARAMETER_SET = 8,
ACCESS_UNIT_DELIMITER = 9,
END_OF_SEQUENCE = 10,
END_OF_STREAM = 11,
FILLER_DATA = 12,
SEQUENCE_PARAMETER_SET_EXTENSION = 13,
PREFIX_NAL_UNIT = 14,
SUBSET_SEQUENCE_PARAMETER_SET = 15,
DEPTH_PARAMETER_SET = 16,
CODED_SLICE_AUXILIARY_PICTURE = 19,
CODED_SLICE_EXTENSION = 20,
CODED_SLICE_EXTENSION_DEPTH_VIEW = 21,
};
/** Search for the closest NAL unit.
*
* \param ptr Beginning of the search range.
* \param endptr End of the search range (exclusive).
*
* \return A valid pointer if a NAL was found, otherwise \ref nullptr.
*/
uint8_t* find_closest_nal(uint8_t* ptr, uint8_t* endptr, size_t& size);
uint32_t get_packet_reference_count(uint8_t* ptr, uint8_t* endptr);
} // namespace streamfx::encoder::codec::h264

View File

@ -0,0 +1,221 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#include "hevc.hpp"
using namespace streamfx::encoder::codec;
enum class nal_unit_type : uint8_t { // 6 bits
TRAIL_N = 0,
TRAIL_R = 1,
TSA_N = 2,
TSA_R = 3,
STSA_N = 4,
STSA_R = 5,
RADL_N = 6,
RADL_R = 7,
RASL_N = 8,
RASL_R = 9,
RSV_VCL_N10 = 10,
RSV_VCL_R11 = 11,
RSV_VCL_N12 = 12,
RSV_VCL_R13 = 13,
RSV_VCL_N14 = 14,
RSV_VCL_R15 = 15,
BLA_W_LP = 16,
BLA_W_RADL = 17,
BLA_N_LP = 18,
IDR_W_RADL = 19,
IDR_N_LP = 20,
CRA = 21,
RSV_IRAP_VCL22 = 22,
RSV_IRAP_VCL23 = 23,
RSV_VCL24 = 24,
RSV_VCL25 = 25,
RSV_VCL26 = 26,
RSV_VCL27 = 27,
RSV_VCL28 = 28,
RSV_VCL29 = 29,
RSV_VCL30 = 30,
RSV_VCL31 = 31,
VPS = 32,
SPS = 33,
PPS = 34,
AUD = 35,
EOS = 36,
EOB = 37,
FD = 38,
PREFIX_SEI = 39,
SUFFIX_SEI = 40,
RSV_NVCL41 = 41,
RSV_NVCL42 = 42,
RSV_NVCL43 = 43,
RSV_NVCL44 = 44,
RSV_NVCL45 = 45,
RSV_NVCL46 = 46,
RSV_NVCL47 = 47,
UNSPEC48 = 48,
UNSPEC49 = 49,
UNSPEC50 = 50,
UNSPEC51 = 51,
UNSPEC52 = 52,
UNSPEC53 = 53,
UNSPEC54 = 54,
UNSPEC55 = 55,
UNSPEC56 = 56,
UNSPEC57 = 57,
UNSPEC58 = 58,
UNSPEC59 = 59,
UNSPEC60 = 60,
UNSPEC61 = 61,
UNSPEC62 = 62,
UNSPEC63 = 63,
};
struct hevc_nal_unit_header {
bool zero_bit : 1;
nal_unit_type nut : 6;
uint8_t layer_id : 6;
uint8_t temporal_id_plus1 : 3;
};
struct hevc_nal {
hevc_nal_unit_header* header;
std::size_t size = 0;
uint8_t* data = nullptr;
};
bool is_nal(uint8_t* data, uint8_t* end)
{
std::size_t s = static_cast<size_t>(end - data);
if (s < 4)
return false;
if (*data != 0x0)
return false;
if (*(data + 1) != 0x0)
return false;
if (*(data + 2) != 0x0)
return false;
if (*(data + 3) != 0x1)
return false;
return true;
}
bool seek_to_nal(uint8_t*& data, uint8_t* end)
{
if (data > end)
return false;
for (; data <= end; data++) {
if (is_nal(data, end)) {
return true;
}
}
return false;
}
std::size_t get_nal_size(uint8_t* data, uint8_t* end)
{
uint8_t* ptr = data + 4;
if (!seek_to_nal(ptr, end)) {
return static_cast<size_t>(end - data);
}
return static_cast<size_t>(ptr - data);
}
bool is_discard_marker(uint8_t* data, uint8_t* end)
{
std::size_t s = static_cast<size_t>(end - data);
if (s < 4)
return false;
if (*data != 0x0)
return false;
if (*(data + 1) != 0x0)
return false;
if (*(data + 2) == 0x3) {
// Discard marker only if the next byte is not 0x0, 0x1, 0x2 or 0x3.
if (*(data + 3) != 0x0)
return false;
if (*(data + 3) != 0x1)
return false;
if (*(data + 3) != 0x2)
return false;
if (*(data + 3) != 0x3)
return false;
return true;
} else {
if (*(data + 2) == 0x0)
return true;
if (*(data + 2) == 0x1)
return true;
if (*(data + 2) == 0x2)
return true;
return false;
}
}
bool should_discard_nal(uint8_t* data, uint8_t* end)
{
if (data > end)
return true;
for (; data <= end; data++) {
if (is_discard_marker(data, end))
return true;
}
return false;
}
void progress_parse(uint8_t*& ptr, uint8_t* end, size_t& sz)
{
ptr += sz;
sz = get_nal_size(ptr, end);
}
void hevc::extract_header_sei(uint8_t* data, std::size_t sz_data, std::vector<uint8_t>& header, std::vector<uint8_t>& sei)
{
uint8_t* ptr = data;
uint8_t* end = data + sz_data;
// Reserve enough memory to store the entire packet data if necessary.
header.reserve(sz_data);
sei.reserve(sz_data);
if (!seek_to_nal(ptr, end)) {
return;
}
for (std::size_t nal_sz = get_nal_size(ptr, end); nal_sz > 0; progress_parse(ptr, end, nal_sz)) {
if (should_discard_nal(ptr + 4, ptr + nal_sz)) {
continue;
}
hevc_nal nal;
nal.header = reinterpret_cast<hevc_nal_unit_header*>(ptr + 4);
nal.size = nal_sz - 4 - 2;
nal.data = ptr + 4 + 2;
switch (nal.header->nut) {
case nal_unit_type::VPS:
case nal_unit_type::SPS:
case nal_unit_type::PPS:
header.insert(header.end(), ptr, ptr + nal_sz);
break;
case nal_unit_type::PREFIX_SEI:
case nal_unit_type::SUFFIX_SEI:
sei.insert(sei.end(), ptr, ptr + nal_sz);
break;
default:
break;
}
}
}

View File

@ -0,0 +1,46 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "common.hpp"
// Codec: HEVC
#define S_CODEC_HEVC "Codec.HEVC"
#define S_CODEC_HEVC_PROFILE "Codec.HEVC.Profile"
#define S_CODEC_HEVC_TIER "Codec.HEVC.Tier"
#define S_CODEC_HEVC_LEVEL "Codec.HEVC.Level"
namespace streamfx::encoder::codec::hevc {
enum class profile {
MAIN,
MAIN10,
RANGE_EXTENDED,
UNKNOWN = -1,
};
enum class tier {
MAIN,
HIGH,
UNKNOWN = -1,
};
enum class level {
L1_0 = 30,
L2_0 = 60,
L2_1 = 63,
L3_0 = 90,
L3_1 = 93,
L4_0 = 120,
L4_1 = 123,
L5_0 = 150,
L5_1 = 153,
L5_2 = 156,
L6_0 = 180,
L6_1 = 183,
L6_2 = 186,
UNKNOWN = -1,
};
void extract_header_sei(uint8_t* data, std::size_t sz_data, std::vector<uint8_t>& header, std::vector<uint8_t>& sei);
} // namespace streamfx::encoder::codec::hevc

View File

@ -0,0 +1,5 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#include "prores.hpp"

View File

@ -0,0 +1,34 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "common.hpp"
// Codec: ProRes
#define S_CODEC_PRORES "Codec.ProRes"
#define S_CODEC_PRORES_PROFILE "Codec.ProRes.Profile"
#define S_CODEC_PRORES_PROFILE_APCS "Codec.ProRes.Profile.APCS"
#define S_CODEC_PRORES_PROFILE_APCO "Codec.ProRes.Profile.APCO"
#define S_CODEC_PRORES_PROFILE_APCN "Codec.ProRes.Profile.APCN"
#define S_CODEC_PRORES_PROFILE_APCH "Codec.ProRes.Profile.APCH"
#define S_CODEC_PRORES_PROFILE_AP4H "Codec.ProRes.Profile.AP4H"
#define S_CODEC_PRORES_PROFILE_AP4X "Codec.ProRes.Profile.AP4X"
namespace streamfx::encoder::codec::prores {
enum class profile : int32_t {
APCO = 0,
Y422_PROXY = APCO,
APCS = 1,
Y422_LT = APCS,
APCN = 2,
Y422 = APCN,
APCH = 3,
Y422_HQ = APCH,
AP4H = 4,
Y4444 = AP4H,
AP4X = 5,
Y4444_XQ = AP4X,
_COUNT,
};
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,161 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "common.hpp"
#include "encoders/ffmpeg/handler.hpp"
#include "ffmpeg/avframe-queue.hpp"
#include "ffmpeg/hwapi/base.hpp"
#include "ffmpeg/swscale.hpp"
#include "obs/obs-encoder-factory.hpp"
#include "warning-disable.hpp"
#include <condition_variable>
#include <map>
#include <mutex>
#include <queue>
#include <stack>
#include <string>
#include <string_view>
#include <thread>
#include <unordered_map>
#include <vector>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/frame.h>
}
#include "warning-enable.hpp"
namespace streamfx::encoder::ffmpeg {
class ffmpeg_instance;
class ffmpeg_factory;
class ffmpeg_manager;
class ffmpeg_instance : public obs::encoder_instance {
ffmpeg_factory* _factory;
const AVCodec* _codec;
AVCodecContext* _context;
streamfx::encoder::ffmpeg::handler* _handler;
::streamfx::ffmpeg::swscale _scaler;
std::shared_ptr<AVPacket> _packet;
std::shared_ptr<::streamfx::ffmpeg::hwapi::base> _hwapi;
std::shared_ptr<::streamfx::ffmpeg::hwapi::instance> _hwinst;
std::size_t _lag_in_frames;
std::size_t _sent_frames;
std::size_t _framerate_divisor;
// Extra Data
bool _have_first_frame;
std::vector<uint8_t> _extra_data;
std::vector<uint8_t> _sei_data;
// Frame Stack and Queue
std::stack<std::shared_ptr<AVFrame>> _free_frames;
std::queue<std::shared_ptr<AVFrame>> _used_frames;
std::chrono::high_resolution_clock::time_point _free_frames_last_used;
public:
ffmpeg_instance(obs_data_t* settings, obs_encoder_t* self, bool is_hw);
virtual ~ffmpeg_instance();
public:
void get_properties(obs_properties_t* props);
void migrate(obs_data_t* settings, uint64_t version) override;
bool update(obs_data_t* settings) override;
bool encode_audio(struct encoder_frame* frame, struct encoder_packet* packet, bool* received_packet) override;
bool encode_video(struct encoder_frame* frame, struct encoder_packet* packet, bool* received_packet) override;
bool encode_video(uint32_t handle, int64_t pts, uint64_t lock_key, uint64_t* next_key, struct encoder_packet* packet, bool* received_packet) override;
bool get_extra_data(uint8_t** extra_data, size_t* size) override;
bool get_sei_data(uint8_t** sei_data, size_t* size) override;
void get_video_info(struct video_scale_info* info) override;
public:
void initialize_sw(obs_data_t* settings);
void initialize_hw(obs_data_t* settings);
void push_free_frame(std::shared_ptr<AVFrame> frame);
std::shared_ptr<AVFrame> pop_free_frame();
void push_used_frame(std::shared_ptr<AVFrame> frame);
std::shared_ptr<AVFrame> pop_used_frame();
int receive_packet(bool* received_packet, struct encoder_packet* packet);
int send_frame(std::shared_ptr<AVFrame> frame);
bool encode_avframe(std::shared_ptr<AVFrame> frame, struct encoder_packet* packet, bool* received_packet);
public: // Handler API
bool is_hardware_encode();
const AVCodec* get_avcodec();
AVCodecContext* get_avcodeccontext();
void log();
void generate_ffmpeg_commandline(std::unordered_map<std::string, std::string>& buffer, const AVClass* obj, void* data);
void parse_ffmpeg_commandline(std::string_view text);
};
class ffmpeg_factory : public obs::encoder_factory<ffmpeg_factory, ffmpeg_instance> {
std::string _id;
std::string _codec;
std::string _name;
const AVCodec* _avcodec;
streamfx::encoder::ffmpeg::handler* _handler;
public:
ffmpeg_factory(ffmpeg_manager* manager, const AVCodec* codec);
virtual ~ffmpeg_factory();
const char* get_name() override;
void get_defaults2(obs_data_t* data) override;
void migrate(obs_data_t* data, uint64_t version) override;
obs_properties_t* get_properties2(instance_t* data) override;
static bool on_manual_open(obs_properties_t* props, obs_property_t* property, void* data);
public:
const AVCodec* get_avcodec();
obs_encoder_info* get_info();
};
class ffmpeg_manager {
std::map<const AVCodec*, std::shared_ptr<ffmpeg_factory>> _factories;
public:
ffmpeg_manager();
~ffmpeg_manager();
streamfx::encoder::ffmpeg::handler* find_handler(std::string_view codec);
streamfx::encoder::ffmpeg::handler* get_handler(std::string_view codec);
bool has_handler(std::string_view codec);
public: // Singleton
static std::shared_ptr<ffmpeg_manager> instance();
};
} // namespace streamfx::encoder::ffmpeg

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,221 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
#pragma once
#include "encoders/encoder-ffmpeg.hpp"
#include "encoders/ffmpeg/handler.hpp"
#include "warning-disable.hpp"
#include <cinttypes>
extern "C" {
#include <libavcodec/avcodec.h>
}
#include "warning-enable.hpp"
/* Parameters by their codec specific name.
* '#' denotes a parameter specified via the context itself.
H.264 H.265 Options Done?
usage usage transcoding --
preset preset speed,balanced,quality Defines
profile profile <different> Defines
level level <different> Defines
tier main,high
rc rc cqp,cbr,vbr_peak,vbr_latency Defines
preanalysis preanalysis false,true Defines
vbaq vbaq false,true Defines
enforce_hrd enforce_hrd false,true Defines
filler_data filler_data false,true --
frame_skipping skip_frame false,true Defines
qp_i qp_i range(-1 - 51) Defines
qp_p qp_p range(-1 - 51) Defines
qp_b range(-1 - 51) Defines
#max_b_frames Defines
bf_delta_qp range(-10 - 10) --
bf_ref false,true Defines
bf_ref_delta_qp range(-10 - 10) --
me_half_pel me_half_pel false,true --
me_quarter_pel me_quarter_pel false,true --
aud aud false,true Defines
max_au_size max_au_size range(0 - Inf) --
#refs range(0 - 16?) Defines
#color_range AVCOL_RANGE_JPEG FFmpeg
#bit_rate Defines
#rc_max_rate Defines
#rc_buffer_size Defines
#rc_initial_buffer_occupancy --
#flags AV_CODEC_FLAG_LOOP_FILTER --
#gop_size FFmpeg
*/
// AMF H.264
// intra_refresh_mb: 0 - Inf
// header_spacing: -1 - 1000
// coder: auto, cavlc, cabac
// qmin, qmax (HEVC uses its own settings)
// AMF H.265
// header_insertion_mode: none, gop, idr
// gops_per_idr: 0 - Inf
// min_qp_i: -1 - 51
// max_qp_i: -1 - 51
// min_qp_p: -1 - 51
// max_qp_p: -1 - 51
namespace streamfx::encoder::ffmpeg {
namespace amf {
bool is_available();
void defaults(ffmpeg_factory* factory, obs_data_t* settings);
void properties_before(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props, AVCodecContext* context);
void properties_after(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props, AVCodecContext* context);
void migrate(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, uint64_t version);
void update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings);
void override_update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings);
class avc : public handler {
public:
avc();
public:
virtual ~avc();
public:
virtual std::string help(ffmpeg_factory* factory) override
{
return "https://github.com/Xaymar/obs-StreamFX/wiki/Encoder-FFmpeg-AMF";
};
public:
bool has_keyframes(ffmpeg_factory* factory) override;
public:
bool has_threading(ffmpeg_factory* factory) override;
public:
bool is_hardware(ffmpeg_factory* factory) override;
public:
bool is_reconfigurable(ffmpeg_factory* factory, bool& threads, bool& gpu, bool& keyframes) override;
public:
void adjust_info(ffmpeg_factory* factory, std::string& id, std::string& name, std::string& codec) override;
public:
void defaults(ffmpeg_factory* factory, obs_data_t* settings) override;
public:
void properties(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props) override;
public:
void migrate(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, uint64_t version) override;
public:
void update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings) override;
public:
void override_update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings) override;
public:
void override_colorformat(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, AVPixelFormat& target_format) override;
};
class hevc : public handler {
public:
hevc();
public:
virtual ~hevc();
public:
virtual std::string help(ffmpeg_factory* factory) override
{
return "https://github.com/Xaymar/obs-StreamFX/wiki/Encoder-FFmpeg-AMF";
};
public:
bool has_keyframes(ffmpeg_factory* factory) override;
public:
bool has_threading(ffmpeg_factory* factory) override;
public:
bool is_hardware(ffmpeg_factory* factory) override;
public:
bool is_reconfigurable(ffmpeg_factory* factory, bool& threads, bool& gpu, bool& keyframes) override;
public:
void adjust_info(ffmpeg_factory* factory, std::string& id, std::string& name, std::string& codec) override;
public:
void defaults(ffmpeg_factory* factory, obs_data_t* settings) override;
public:
void properties(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props) override;
public:
void migrate(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, uint64_t version) override;
public:
void update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings) override;
public:
void override_update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings) override;
public:
void override_colorformat(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, AVPixelFormat& target_format) override;
};
class av1 : public handler {
public:
av1();
public:
virtual ~av1();
public:
virtual std::string help(ffmpeg_factory* factory) override
{
return "https://github.com/Xaymar/obs-StreamFX/wiki/Encoder-FFmpeg-AMF";
};
public:
bool has_keyframes(ffmpeg_factory* factory) override;
public:
bool has_threading(ffmpeg_factory* factory) override;
public:
bool is_hardware(ffmpeg_factory* factory) override;
public:
bool is_reconfigurable(ffmpeg_factory* factory, bool& threads, bool& gpu, bool& keyframes) override;
public:
void adjust_info(ffmpeg_factory* factory, std::string& id, std::string& name, std::string& codec) override;
public:
void defaults(ffmpeg_factory* factory, obs_data_t* settings) override;
public:
void properties(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props) override;
public:
void migrate(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, uint64_t version) override;
public:
void update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings) override;
public:
void override_update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings) override;
public:
void override_colorformat(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, AVPixelFormat& target_format) override;
};
} // namespace amf
} // namespace streamfx::encoder::ffmpeg

View File

@ -0,0 +1,89 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2020 Daniel Molkentin <daniel@molkentin.de>
#include "cfhd.hpp"
#include "common.hpp"
#include "encoders/encoder-ffmpeg.hpp"
#include "ffmpeg/tools.hpp"
#include "handler.hpp"
#include "plugin.hpp"
#include "warning-disable.hpp"
#include <map>
#include <string>
#include <utility>
#include <vector>
extern "C" {
#include <libavutil/opt.h>
}
#include "warning-enable.hpp"
using namespace streamfx::encoder::ffmpeg;
struct strings {
struct quality {
static constexpr const char* ffmpeg = "quality";
static constexpr const char* obs = "Quality";
static constexpr const char* i18n = "Encoder.FFmpeg.CineForm.Quality";
};
};
cfhd::cfhd() : handler("cfhd") {}
bool cfhd::has_keyframes(ffmpeg_factory* factory)
{
return false;
}
void cfhd::properties(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props)
{
// Try and acquire a valid context.
std::shared_ptr<AVCodecContext> ctx;
if (instance) {
ctx = std::shared_ptr<AVCodecContext>(instance->get_avcodeccontext(), [](AVCodecContext*) {});
} else { // If we don't have a context, create a temporary one that is automatically freed.
ctx = std::shared_ptr<AVCodecContext>(avcodec_alloc_context3(factory->get_avcodec()), [](AVCodecContext* v) { avcodec_free_context(&v); });
if (!ctx->priv_data) {
return;
}
}
{ // Quality parameter
auto to_string = [](const char* v) {
char buffer[1024];
snprintf(buffer, sizeof(buffer), "%s.%s", strings::quality::i18n, v);
return D_TRANSLATE(buffer);
};
auto p = obs_properties_add_list(props, strings::quality::obs, D_TRANSLATE(strings::quality::i18n), OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_STRING);
streamfx::ffmpeg::tools::avoption_list_add_entries(ctx->priv_data, strings::quality::ffmpeg, [&p, &to_string](const AVOption* opt) {
// FFmpeg returns this list in the wrong order. We want to start at the lowest, and go to the highest.
// So simply always insert at the top, and this will reverse the list.
obs_property_list_insert_string(p, 0, to_string(opt->name), opt->name);
});
}
}
std::string cfhd::help(ffmpeg_factory* factory)
{
return "https://github.com/Xaymar/obs-StreamFX/wiki/Encoder-FFmpeg-GoPro-CineForm";
}
void cfhd::defaults(ffmpeg_factory* factory, obs_data_t* settings)
{
obs_data_set_string(settings, strings::quality::obs, "film3+");
}
void cfhd::migrate(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, uint64_t version) {}
void cfhd::update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings)
{
if (const char* v = obs_data_get_string(settings, strings::quality::obs); v && (v[0] != '\0')) {
av_opt_set(instance->get_avcodeccontext()->priv_data, strings::quality::ffmpeg, v, AV_OPT_SEARCH_CHILDREN);
}
}
static cfhd handler = cfhd();

View File

@ -0,0 +1,26 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "handler.hpp"
namespace streamfx::encoder::ffmpeg {
class cfhd : public handler {
public:
cfhd();
virtual ~cfhd(){};
bool has_keyframes(ffmpeg_factory* factory) override;
std::string help(ffmpeg_factory* factory) override;
void defaults(ffmpeg_factory* factory, obs_data_t* settings) override;
void properties(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props) override;
void migrate(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, uint64_t version) override;
void update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings) override;
};
} // namespace streamfx::encoder::ffmpeg

View File

@ -0,0 +1,164 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2020 Daniel Molkentin <daniel@molkentin.de>
// AUTOGENERATED COPYRIGHT HEADER END
#include "debug.hpp"
#include "common.hpp"
#include "../encoder-ffmpeg.hpp"
#include "handler.hpp"
#include "plugin.hpp"
#include "warning-disable.hpp"
#include <map>
#include <string>
#include <utility>
#include <vector>
extern "C" {
#include <libavutil/opt.h>
}
#include "warning-enable.hpp"
template<typename T>
std::string to_string(T value)
{
return std::string("Error: to_string not implemented for this type!");
};
template<>
std::string to_string(int64_t value)
{
std::vector<char> buf(32);
snprintf(buf.data(), buf.size(), "%" PRId64, value);
return std::string(buf.data(), buf.data() + buf.size());
}
template<>
std::string to_string(uint64_t value)
{
std::vector<char> buf(32);
snprintf(buf.data(), buf.size(), "%" PRIu64, value);
return std::string(buf.data(), buf.data() + buf.size());
}
template<>
std::string to_string(double_t value)
{
std::vector<char> buf(32);
snprintf(buf.data(), buf.size(), "%f", value);
return std::string(buf.data(), buf.data() + buf.size());
}
using namespace streamfx::encoder::ffmpeg;
debug::debug() : handler("") {}
void debug::properties(ffmpeg_instance* instance, obs_properties_t* props)
{
const AVCodec* codec = instance->get_avcodec();
if (instance->get_avcodeccontext())
return;
AVCodecContext* ctx = avcodec_alloc_context3(codec);
if (!ctx->priv_data) {
avcodec_free_context(&ctx);
return;
}
DLOG_INFO("Options for '%s':", codec->name);
std::pair<AVOptionType, std::string> opt_type_name[] = {
{AV_OPT_TYPE_FLAGS, "Flags"}, {AV_OPT_TYPE_INT, "Int"}, {AV_OPT_TYPE_INT64, "Int64"}, {AV_OPT_TYPE_DOUBLE, "Double"}, {AV_OPT_TYPE_FLOAT, "Float"}, {AV_OPT_TYPE_STRING, "String"}, {AV_OPT_TYPE_RATIONAL, "Rational"}, {AV_OPT_TYPE_BINARY, "Binary"}, {AV_OPT_TYPE_DICT, "Dictionary"}, {AV_OPT_TYPE_UINT64, "Unsigned Int64"}, {AV_OPT_TYPE_CONST, "Constant"}, {AV_OPT_TYPE_IMAGE_SIZE, "Image Size"}, {AV_OPT_TYPE_PIXEL_FMT, "Pixel Format"}, {AV_OPT_TYPE_SAMPLE_FMT, "Sample Format"}, {AV_OPT_TYPE_VIDEO_RATE, "Video Rate"}, {AV_OPT_TYPE_DURATION, "Duration"}, {AV_OPT_TYPE_COLOR, "Color"}, {AV_OPT_TYPE_CHANNEL_LAYOUT, "Layout"}, {AV_OPT_TYPE_BOOL, "Bool"},
};
std::map<std::string, AVOptionType> unit_types;
const AVOption* opt = nullptr;
while ((opt = av_opt_next(ctx->priv_data, opt)) != nullptr) {
std::string type_name = "";
for (auto kv : opt_type_name) {
if (opt->type == kv.first) {
type_name = kv.second;
break;
}
}
if (opt->type == AV_OPT_TYPE_CONST) {
if (opt->unit == nullptr) {
DLOG_INFO(" Constant '%s' and help text '%s' with unknown settings.", opt->name, opt->help);
} else {
auto unit_type = unit_types.find(opt->unit);
if (unit_type == unit_types.end()) {
DLOG_INFO(" [%s] Flag '%s' and help text '%s' with value '%" PRId64 "'.", opt->unit, opt->name, opt->help, opt->default_val.i64);
} else {
std::string out;
switch (unit_type->second) {
case AV_OPT_TYPE_BOOL:
out = opt->default_val.i64 ? "true" : "false";
break;
case AV_OPT_TYPE_INT:
out = to_string(opt->default_val.i64);
break;
case AV_OPT_TYPE_UINT64:
out = to_string(static_cast<uint64_t>(opt->default_val.i64));
break;
case AV_OPT_TYPE_FLAGS:
out = to_string(static_cast<uint64_t>(opt->default_val.i64));
break;
case AV_OPT_TYPE_FLOAT:
case AV_OPT_TYPE_DOUBLE:
out = to_string(opt->default_val.dbl);
break;
case AV_OPT_TYPE_STRING:
out = opt->default_val.str;
break;
default:
break;
}
DLOG_INFO(" [%s] Constant '%s' and help text '%s' with value '%s'.", opt->unit, opt->name, opt->help, out.c_str());
}
}
} else {
if (opt->unit != nullptr) {
unit_types.emplace(opt->name, opt->type);
}
std::string minimum = "", maximum = "", out;
minimum = to_string(opt->min);
maximum = to_string(opt->max);
{
switch (opt->type) {
case AV_OPT_TYPE_BOOL:
out = opt->default_val.i64 ? "true" : "false";
break;
case AV_OPT_TYPE_INT:
out = to_string(opt->default_val.i64);
break;
case AV_OPT_TYPE_UINT64:
out = to_string(static_cast<uint64_t>(opt->default_val.i64));
break;
case AV_OPT_TYPE_FLAGS:
out = to_string(static_cast<uint64_t>(opt->default_val.i64));
break;
case AV_OPT_TYPE_FLOAT:
case AV_OPT_TYPE_DOUBLE:
out = to_string(opt->default_val.dbl);
break;
case AV_OPT_TYPE_STRING:
out = opt->default_val.str ? opt->default_val.str : "<invalid>";
break;
default:
break;
}
}
DLOG_INFO(
" Option '%s'%s%s%s with help '%s' of type '%s' with default value '%s', minimum '%s' and maximum "
"'%s'.",
opt->name, opt->unit ? " with unit (" : "", opt->unit ? opt->unit : "", opt->unit ? ")" : "", opt->help, type_name.c_str(), out.c_str(), minimum.c_str(), maximum.c_str());
}
}
}
static debug handler();

View File

@ -0,0 +1,16 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "handler.hpp"
namespace streamfx::encoder::ffmpeg {
class debug : public handler {
public:
debug();
virtual ~debug(){};
virtual void properties(ffmpeg_instance* instance, obs_properties_t* props);
};
} // namespace streamfx::encoder::ffmpeg

View File

@ -0,0 +1,99 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2022 Carsten Braun <info@braun-cloud.de>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// Copyright (C) 2022-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#include "dnxhd.hpp"
#include "common.hpp"
#include "../codecs/dnxhr.hpp"
#include "ffmpeg/tools.hpp"
#include "plugin.hpp"
#include "warning-disable.hpp"
#include <array>
#include "warning-enable.hpp"
using namespace streamfx::encoder::ffmpeg;
using namespace streamfx::encoder::codec::dnxhr;
inline const char* dnx_profile_to_display_name(const char* profile)
{
char buffer[1024];
snprintf(buffer, sizeof(buffer), "%s.%s", S_CODEC_DNXHR_PROFILE, profile);
return D_TRANSLATE(buffer);
}
dnxhd::dnxhd() : handler("dnxhd") {}
dnxhd::~dnxhd() {}
void dnxhd::adjust_info(ffmpeg_factory* factory, std::string& id, std::string& name, std::string& codec)
{
//Most people don't know what VC3 is and only know it as DNx.
//Change name to make it easier to find.
name = "Avid DNxHR (via FFmpeg)";
}
bool dnxhd::has_keyframes(ffmpeg_factory* instance)
{
return false;
}
void dnxhd::defaults(ffmpeg_factory* factory, obs_data_t* settings)
{
obs_data_set_default_string(settings, S_CODEC_DNXHR_PROFILE, "dnxhr_sq");
}
void dnxhd::properties(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props)
{
// Try and acquire a valid context.
std::shared_ptr<AVCodecContext> ctx;
if (instance) {
ctx = std::shared_ptr<AVCodecContext>(instance->get_avcodeccontext(), [](AVCodecContext*) {});
} else { // If we don't have a context, create a temporary one that is automatically freed.
ctx = std::shared_ptr<AVCodecContext>(avcodec_alloc_context3(factory->get_avcodec()), [](AVCodecContext* v) { avcodec_free_context(&v); });
if (!ctx->priv_data) {
return;
}
}
auto p = obs_properties_add_list(props, S_CODEC_DNXHR_PROFILE, D_TRANSLATE(S_CODEC_DNXHR_PROFILE), OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_STRING);
streamfx::ffmpeg::tools::avoption_list_add_entries(ctx->priv_data, "profile", [&p](const AVOption* opt) {
if (strcmp(opt->name, "dnxhd") == 0) {
//Do not show DNxHD profile as it is outdated and should not be used.
//It's also very picky about framerate and framesize combos, which makes it even less useful
return;
}
//ffmpeg returns the profiles for DNxHR from highest to lowest.
//Lowest to highest is what people usually expect.
//Therefore, new entries will always be inserted at the top, effectively reversing the list
obs_property_list_insert_string(p, 0, dnx_profile_to_display_name(opt->name), opt->name);
});
}
void dnxhd::update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings)
{
const char* profile = obs_data_get_string(settings, S_CODEC_DNXHR_PROFILE);
av_opt_set(instance->get_avcodeccontext(), "profile", profile, AV_OPT_SEARCH_CHILDREN);
}
void dnxhd::override_colorformat(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, AVPixelFormat& target_format)
{
static const std::array<std::pair<const char*, AVPixelFormat>, static_cast<size_t>(5)> profile_to_format_map{std::pair{"dnxhr_lb", AV_PIX_FMT_YUV422P}, std::pair{"dnxhr_sq", AV_PIX_FMT_YUV422P}, std::pair{"dnxhr_hq", AV_PIX_FMT_YUV422P}, std::pair{"dnxhr_hqx", AV_PIX_FMT_YUV422P10}, std::pair{"dnxhr_444", AV_PIX_FMT_YUV444P10}};
const char* selected_profile = obs_data_get_string(settings, S_CODEC_DNXHR_PROFILE);
for (const auto& kv : profile_to_format_map) {
if (strcmp(kv.first, selected_profile) == 0) {
target_format = kv.second;
return;
}
}
// Fallback for (yet) unknown formats
target_format = AV_PIX_FMT_YUV422P;
}
static auto inst = dnxhd();

View File

@ -0,0 +1,36 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
// Copyright (C) 2022 Carsten Braun <info@braun-cloud.de>
#pragma once
#include "encoders/encoder-ffmpeg.hpp"
#include "encoders/ffmpeg/handler.hpp"
#include "warning-disable.hpp"
extern "C" {
#include <libavcodec/avcodec.h>
}
#include "warning-enable.hpp"
namespace streamfx::encoder::ffmpeg {
class dnxhd : public handler {
public:
dnxhd();
virtual ~dnxhd();
virtual bool has_keyframes(ffmpeg_factory* factory);
virtual void adjust_info(ffmpeg_factory* factory, std::string& id, std::string& name, std::string& codec);
virtual std::string help(ffmpeg_factory* factory) {
return "https://github.com/Xaymar/obs-StreamFX/wiki/Encoder-FFmpeg-Avid-DNxHR";
}
virtual void defaults(ffmpeg_factory* factory, obs_data_t* settings);
virtual void properties(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props);
virtual void update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings);
virtual void override_colorformat(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, AVPixelFormat& target_format);
};
} // namespace streamfx::encoder::ffmpeg

View File

@ -0,0 +1,79 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#include "handler.hpp"
#include "../encoder-ffmpeg.hpp"
streamfx::encoder::ffmpeg::handler::handler_map_t& streamfx::encoder::ffmpeg::handler::handlers()
{
static handler_map_t handlers;
return handlers;
}
streamfx::encoder::ffmpeg::handler::handler(std::string codec)
{
handlers().emplace(codec, this);
}
bool streamfx::encoder::ffmpeg::handler::has_keyframes(ffmpeg_factory* factory)
{
#if defined(AV_CODEC_PROP_INTRA_ONLY) // TODO: Determine if we need to check for an exact version.
if (auto* desc = avcodec_descriptor_get(factory->get_avcodec()->id); desc) {
return (desc->props & AV_CODEC_PROP_INTRA_ONLY) == 0;
}
#endif
#ifdef AV_CODEC_CAP_INTRA_ONLY
return (factory->get_avcodec()->capabilities & AV_CODEC_CAP_INTRA_ONLY) == 0;
#else
return false;
#endif
}
bool streamfx::encoder::ffmpeg::handler::has_threading(ffmpeg_factory* factory)
{
return (factory->get_avcodec()->capabilities
& (AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS
#if defined(AV_CODEC_CAP_OTHER_THREADS) // TODO: Determine if we need to check for an exact version.
| AV_CODEC_CAP_OTHER_THREADS
#else
| AV_CODEC_CAP_AUTO_THREADS
#endif
));
}
bool streamfx::encoder::ffmpeg::handler::is_hardware(ffmpeg_factory* factory)
{
if (factory->get_avcodec()->capabilities & AV_CODEC_CAP_HARDWARE) {
return true;
}
return false;
}
bool streamfx::encoder::ffmpeg::handler::is_reconfigurable(ffmpeg_factory* factory, bool& threads, bool& gpu, bool& keyframes)
{
if (factory->get_avcodec()->capabilities & AV_CODEC_CAP_PARAM_CHANGE) {
return true;
}
return false;
}
void streamfx::encoder::ffmpeg::handler::adjust_info(ffmpeg_factory* factory, std::string& id, std::string& name, std::string& codec) {}
std::string streamfx::encoder::ffmpeg::handler::help(ffmpeg_factory* factory)
{
return "about:blank";
}
void streamfx::encoder::ffmpeg::handler::defaults(ffmpeg_factory* factory, obs_data_t* settings) {}
void streamfx::encoder::ffmpeg::handler::properties(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props) {}
void streamfx::encoder::ffmpeg::handler::migrate(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, uint64_t version) {}
void streamfx::encoder::ffmpeg::handler::update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings) {}
void streamfx::encoder::ffmpeg::handler::override_update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings) {}
void streamfx::encoder::ffmpeg::handler::override_colorformat(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, AVPixelFormat& target_format) {}

View File

@ -0,0 +1,46 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "warning-disable.hpp"
#include <cstdint>
#include <map>
#include <string>
extern "C" {
#include <obs.h>
#include <libavcodec/avcodec.h>
}
#include "warning-enable.hpp"
namespace streamfx::encoder::ffmpeg {
class ffmpeg_factory;
class ffmpeg_instance;
struct handler {
handler(std::string codec);
virtual ~handler(){};
virtual bool has_keyframes(ffmpeg_factory* factory);
virtual bool has_threading(ffmpeg_factory* factory);
virtual bool is_hardware(ffmpeg_factory* factory);
virtual bool is_reconfigurable(ffmpeg_factory* factory, bool& threads, bool& gpu, bool& keyframes);
virtual void adjust_info(ffmpeg_factory* factory, std::string& id, std::string& name, std::string& codec);
virtual std::string help(ffmpeg_factory* factory);
virtual void defaults(ffmpeg_factory* factory, obs_data_t* settings);
virtual void properties(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props);
virtual void migrate(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, uint64_t version);
virtual void update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings);
virtual void override_update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings);
virtual void override_colorformat(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, AVPixelFormat& target_format);
public:
typedef std::map<std::string, handler*> handler_map_t;
static handler_map_t& handlers();
};
} // namespace streamfx::encoder::ffmpeg

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,121 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
#pragma once
#include "encoders/encoder-ffmpeg.hpp"
#include "encoders/ffmpeg/handler.hpp"
#include "warning-disable.hpp"
#include <cinttypes>
#include <string>
extern "C" {
#include <libavcodec/avcodec.h>
}
#include "warning-enable.hpp"
/* NVENC has multiple compression modes:
- CBR: Constant Bitrate (rc=cbr)
- VBR: Variable Bitrate (rc=vbr)
- CQP: Constant QP (rc=cqp)
- CQ: Constant Quality (rc=vbr b=0 maxrate=0 qmin=0 qmax=51 cq=qp), this is basically CRF in X264.
*/
namespace streamfx::encoder::ffmpeg {
namespace nvenc {
bool is_available();
void defaults(ffmpeg_factory* factory, obs_data_t* settings);
void properties_before(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props, AVCodecContext* context);
void properies_after(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props, AVCodecContext* context);
void properties_runtime(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props);
void migrate(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, uint64_t version);
void update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings);
void override_update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings);
} // namespace nvenc
class nvenc_h264 : public handler {
public:
nvenc_h264();
virtual ~nvenc_h264();
bool has_keyframes(ffmpeg_factory* factory) override;
bool has_threading(ffmpeg_factory* factory) override;
bool is_hardware(ffmpeg_factory* factory) override;
bool is_reconfigurable(ffmpeg_factory* factory, bool& threads, bool& gpu, bool& keyframes) override;
void adjust_info(ffmpeg_factory* factory, std::string& id, std::string& name, std::string& codec) override;
std::string help(ffmpeg_factory* factory) override
{
return "https://github.com/Xaymar/obs-StreamFX/wiki/Encoder-FFmpeg-NVENC";
};
void defaults(ffmpeg_factory* factory, obs_data_t* settings) override;
void properties(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props) override;
void migrate(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, uint64_t version) override;
void update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings) override;
void override_update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings) override;
private:
void properties_encoder(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props);
void properties_runtime(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props);
};
class nvenc_hevc : public handler {
public:
nvenc_hevc();
virtual ~nvenc_hevc();
bool has_keyframes(ffmpeg_factory* factory) override;
bool has_threading(ffmpeg_factory* factory) override;
bool is_hardware(ffmpeg_factory* factory) override;
bool is_reconfigurable(ffmpeg_factory* factory, bool& threads, bool& gpu, bool& keyframes) override;
void adjust_info(ffmpeg_factory* factory, std::string& id, std::string& name, std::string& codec) override;
std::string help(ffmpeg_factory* factory) override
{
return "https://github.com/Xaymar/obs-StreamFX/wiki/Encoder-FFmpeg-NVENC";
};
void defaults(ffmpeg_factory* factory, obs_data_t* settings) override;
void properties(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props) override;
void migrate(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, uint64_t version) override;
void update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings) override;
void override_update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings) override;
private:
void properties_encoder(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props);
void properties_runtime(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props);
};
class nvenc_av1 : public handler {
public:
nvenc_av1();
virtual ~nvenc_av1();
bool has_keyframes(ffmpeg_factory* factory) override;
bool has_threading(ffmpeg_factory* factory) override;
bool is_hardware(ffmpeg_factory* factory) override;
bool is_reconfigurable(ffmpeg_factory* factory, bool& threads, bool& gpu, bool& keyframes) override;
void adjust_info(ffmpeg_factory* factory, std::string& id, std::string& name, std::string& codec) override;
std::string help(ffmpeg_factory* factory) override
{
return "https://github.com/Xaymar/obs-StreamFX/wiki/Encoder-FFmpeg-NVENC";
};
void defaults(ffmpeg_factory* factory, obs_data_t* settings) override;
void properties(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props) override;
void migrate(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, uint64_t version) override;
void update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings) override;
void override_update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings) override;
private:
void properties_encoder(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props);
void properties_runtime(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props);
};
} // namespace streamfx::encoder::ffmpeg

View File

@ -0,0 +1,86 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#include "prores_aw.hpp"
#include "common.hpp"
#include "../codecs/prores.hpp"
#include "ffmpeg/tools.hpp"
#include "plugin.hpp"
#include "warning-disable.hpp"
#include <array>
#include "warning-enable.hpp"
using namespace streamfx::encoder::ffmpeg;
using namespace streamfx::encoder::codec::prores;
inline const char* profile_to_name(const AVProfile* ptr)
{
switch (static_cast<profile>(ptr->profile)) {
case profile::APCO:
return D_TRANSLATE(S_CODEC_PRORES_PROFILE_APCO);
case profile::APCS:
return D_TRANSLATE(S_CODEC_PRORES_PROFILE_APCS);
case profile::APCN:
return D_TRANSLATE(S_CODEC_PRORES_PROFILE_APCN);
case profile::APCH:
return D_TRANSLATE(S_CODEC_PRORES_PROFILE_APCH);
case profile::AP4H:
return D_TRANSLATE(S_CODEC_PRORES_PROFILE_AP4H);
case profile::AP4X:
return D_TRANSLATE(S_CODEC_PRORES_PROFILE_AP4X);
default:
return ptr->name;
}
}
prores_aw::prores_aw() : handler("prores_aw") {}
prores_aw::~prores_aw() {}
bool prores_aw::has_keyframes(ffmpeg_factory* instance)
{
return false;
}
void prores_aw::defaults(ffmpeg_factory* factory, obs_data_t* settings)
{
obs_data_set_default_int(settings, S_CODEC_PRORES_PROFILE, 0);
}
void prores_aw::properties(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props)
{
if (!instance) {
auto p = obs_properties_add_list(props, S_CODEC_PRORES_PROFILE, D_TRANSLATE(S_CODEC_PRORES_PROFILE), OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_INT);
for (auto ptr = factory->get_avcodec()->profiles; ptr->profile != FF_PROFILE_UNKNOWN; ptr++) {
obs_property_list_add_int(p, profile_to_name(ptr), static_cast<int64_t>(ptr->profile));
}
} else {
obs_property_set_enabled(obs_properties_get(props, S_CODEC_PRORES_PROFILE), false);
}
}
void prores_aw::update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings)
{
if (instance) {
instance->get_avcodeccontext()->profile = static_cast<int>(obs_data_get_int(settings, S_CODEC_PRORES_PROFILE));
}
}
void prores_aw::override_colorformat(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, AVPixelFormat& target_format)
{
static const std::array<std::pair<profile, AVPixelFormat>, static_cast<size_t>(profile::_COUNT)> profile_to_format_map{
std::pair{profile::APCO, AV_PIX_FMT_YUV422P10}, std::pair{profile::APCS, AV_PIX_FMT_YUV422P10}, std::pair{profile::APCN, AV_PIX_FMT_YUV422P10}, std::pair{profile::APCH, AV_PIX_FMT_YUV422P10}, std::pair{profile::AP4H, AV_PIX_FMT_YUV444P10}, std::pair{profile::AP4X, AV_PIX_FMT_YUV444P10},
};
const int64_t profile_id = obs_data_get_int(settings, S_CODEC_PRORES_PROFILE);
for (auto kv : profile_to_format_map) {
if (kv.first == static_cast<profile>(profile_id)) {
target_format = kv.second;
break;
}
}
}
static auto inst = prores_aw();

View File

@ -0,0 +1,33 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "encoders/encoder-ffmpeg.hpp"
#include "encoders/ffmpeg/handler.hpp"
#include "warning-disable.hpp"
extern "C" {
#include <libavcodec/avcodec.h>
}
#include "warning-enable.hpp"
namespace streamfx::encoder::ffmpeg {
class prores_aw : public handler {
public:
prores_aw();
virtual ~prores_aw();
virtual bool has_keyframes(ffmpeg_factory* factory);
virtual std::string help(ffmpeg_factory* factory) {
return "https://github.com/Xaymar/obs-StreamFX/wiki/Encoder-FFmpeg-Apple-ProRes";
}
virtual void defaults(ffmpeg_factory* factory, obs_data_t* settings);
virtual void properties(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_properties_t* props);
virtual void update(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings);
virtual void override_colorformat(ffmpeg_factory* factory, ffmpeg_instance* instance, obs_data_t* settings, AVPixelFormat& target_format);
};
} // namespace streamfx::encoder::ffmpeg

View File

@ -0,0 +1,131 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// AUTOGENERATED COPYRIGHT HEADER END
#include "avframe-queue.hpp"
#include "tools.hpp"
using namespace streamfx::ffmpeg;
std::shared_ptr<AVFrame> avframe_queue::create_frame()
{
std::shared_ptr<AVFrame> frame = std::shared_ptr<AVFrame>(av_frame_alloc(), [](AVFrame* frame) {
av_frame_unref(frame);
av_frame_free(&frame);
});
frame->width = this->_resolution.first;
frame->height = this->_resolution.second;
frame->format = this->_format;
int res = av_frame_get_buffer(frame.get(), 32);
if (res < 0) {
throw std::runtime_error(tools::get_error_description(res));
}
return frame;
}
avframe_queue::avframe_queue() = default;
avframe_queue::~avframe_queue()
{
clear();
}
void avframe_queue::set_resolution(int32_t const width, int32_t const height)
{
this->_resolution.first = width;
this->_resolution.second = height;
}
void avframe_queue::get_resolution(int32_t& width, int32_t& height)
{
width = this->_resolution.first;
height = this->_resolution.second;
}
int32_t avframe_queue::get_width()
{
return this->_resolution.first;
}
int32_t avframe_queue::get_height()
{
return this->_resolution.second;
}
void avframe_queue::set_pixel_format(AVPixelFormat const format)
{
this->_format = format;
}
AVPixelFormat avframe_queue::get_pixel_format()
{
return this->_format;
}
void avframe_queue::precache(std::size_t count)
{
for (std::size_t n = 0; n < count; n++) {
push(create_frame());
}
}
void avframe_queue::clear()
{
std::unique_lock<std::mutex> ulock(this->_lock);
_frames.clear();
}
void avframe_queue::push(std::shared_ptr<AVFrame> const frame)
{
std::unique_lock<std::mutex> ulock(this->_lock);
_frames.push_back(frame);
}
std::shared_ptr<AVFrame> avframe_queue::pop()
{
std::unique_lock<std::mutex> ulock(this->_lock);
std::shared_ptr<AVFrame> ret;
while (ret == nullptr) {
if (_frames.size() == 0) {
ret = create_frame();
} else {
ret = _frames.front();
if (ret == nullptr) {
ret = create_frame();
} else {
_frames.pop_front();
if ((static_cast<int32_t>(ret->width) != this->_resolution.first) || (static_cast<int32_t>(ret->height) != this->_resolution.second) || (ret->format != this->_format)) {
ret = nullptr;
}
}
}
}
return ret;
}
std::shared_ptr<AVFrame> avframe_queue::pop_only()
{
std::unique_lock<std::mutex> ulock(this->_lock);
if (_frames.size() == 0) {
return nullptr;
}
std::shared_ptr<AVFrame> ret = _frames.front();
if (ret == nullptr) {
return nullptr;
}
_frames.pop_front();
return ret;
}
bool avframe_queue::empty()
{
return _frames.empty();
}
std::size_t avframe_queue::size()
{
return _frames.size();
}

View File

@ -0,0 +1,55 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "common.hpp"
#include "warning-disable.hpp"
#include <deque>
#include <mutex>
#include "warning-enable.hpp"
extern "C" {
#include "warning-disable.hpp"
#include <libavutil/frame.h>
#include "warning-enable.hpp"
}
namespace streamfx::ffmpeg {
class avframe_queue {
std::deque<std::shared_ptr<AVFrame>> _frames;
std::mutex _lock;
std::pair<int32_t, int32_t> _resolution;
AVPixelFormat _format = AV_PIX_FMT_NONE;
std::shared_ptr<AVFrame> create_frame();
public:
avframe_queue();
~avframe_queue();
void set_resolution(int32_t width, int32_t height);
void get_resolution(int32_t& width, int32_t& height);
int32_t get_width();
int32_t get_height();
void set_pixel_format(AVPixelFormat format);
AVPixelFormat get_pixel_format();
void precache(std::size_t count);
void clear();
void push(std::shared_ptr<AVFrame> frame);
std::shared_ptr<AVFrame> pop();
std::shared_ptr<AVFrame> pop_only();
bool empty();
std::size_t size();
};
} // namespace streamfx::ffmpeg

View File

@ -0,0 +1,5 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#include "base.hpp"

View File

@ -0,0 +1,50 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "common.hpp"
#include "warning-disable.hpp"
#include <list>
#include <utility>
#include "warning-enable.hpp"
extern "C" {
#include "warning-disable.hpp"
#include <libavutil/frame.h>
#include <libavutil/hwcontext.h>
#include "warning-enable.hpp"
}
namespace streamfx::ffmpeg::hwapi {
struct device {
std::pair<int64_t, int64_t> id;
std::string name;
};
class instance {
public:
virtual ~instance(){};
virtual AVBufferRef* create_device_context() = 0;
virtual std::shared_ptr<AVFrame> allocate_frame(AVBufferRef* frames) = 0;
virtual void copy_from_obs(AVBufferRef* frames, uint32_t handle, uint64_t lock_key, uint64_t* next_lock_key, std::shared_ptr<AVFrame> frame) = 0;
virtual std::shared_ptr<AVFrame> avframe_from_obs(AVBufferRef* frames, uint32_t handle, uint64_t lock_key, uint64_t* next_lock_key) = 0;
};
class base {
public:
virtual ~base(){};
virtual std::list<hwapi::device> enumerate_adapters() = 0;
virtual std::shared_ptr<hwapi::instance> create(const hwapi::device& target) = 0;
virtual std::shared_ptr<hwapi::instance> create_from_obs() = 0;
};
} // namespace streamfx::ffmpeg::hwapi

View File

@ -0,0 +1,235 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// AUTOGENERATED COPYRIGHT HEADER END
#ifdef WIN32
#include "d3d11.hpp"
#include "obs/gs/gs-helper.hpp"
#include "warning-disable.hpp"
#include <sstream>
#include <vector>
#include "warning-enable.hpp"
extern "C" {
#include "warning-disable.hpp"
#include <libavutil/hwcontext_d3d11va.h>
#include "warning-enable.hpp"
}
using namespace streamfx::ffmpeg::hwapi;
d3d11::d3d11() : _dxgi_module(0), _d3d11_module(0)
{
_dxgi_module = LoadLibraryW(L"dxgi.dll");
if (!_dxgi_module)
throw std::runtime_error("Unable to load DXGI");
_d3d11_module = LoadLibraryW(L"d3d11.dll");
if (!_d3d11_module)
throw std::runtime_error("Unable to load D3D11");
#pragma warning(push)
#pragma warning(disable : 4191)
_CreateDXGIFactory = reinterpret_cast<CreateDXGIFactory_t>(GetProcAddress(_dxgi_module, "CreateDXGIFactory"));
_CreateDXGIFactory1 = reinterpret_cast<CreateDXGIFactory1_t>(GetProcAddress(_dxgi_module, "CreateDXGIFactory1"));
_D3D11CreateDevice = reinterpret_cast<D3D11CreateDevice_t>(GetProcAddress(_d3d11_module, "D3D11CreateDevice"));
#pragma warning(pop)
if (!_CreateDXGIFactory && !_CreateDXGIFactory1)
throw std::runtime_error("DXGI not supported");
if (!_D3D11CreateDevice)
throw std::runtime_error("D3D11 not supported");
HRESULT hr = _CreateDXGIFactory1(__uuidof(IDXGIFactory1), (void**)&_dxgifactory);
if (FAILED(hr)) {
std::stringstream sstr;
sstr << "Failed to create DXGI Factory (" << hr << ")";
throw std::runtime_error(sstr.str());
}
}
d3d11::~d3d11()
{
FreeLibrary(_dxgi_module);
FreeLibrary(_d3d11_module);
}
std::list<device> d3d11::enumerate_adapters()
{
std::list<device> adapters;
// Enumerate Adapters
IDXGIAdapter1* dxgi_adapter = nullptr;
for (UINT idx = 0; !FAILED(_dxgifactory->EnumAdapters1(idx, &dxgi_adapter)); idx++) {
DXGI_ADAPTER_DESC1 desc = DXGI_ADAPTER_DESC1();
dxgi_adapter->GetDesc1(&desc);
std::vector<char> buf(1024);
std::size_t len = static_cast<size_t>(snprintf(buf.data(), buf.size(), "%ls (VEN_%04x/DEV_%04x/SUB_%04x/REV_%04x)", desc.Description, desc.VendorId, desc.DeviceId, desc.SubSysId, desc.Revision));
device dev;
dev.name = std::string(buf.data(), buf.data() + len);
dev.id.first = desc.AdapterLuid.HighPart;
dev.id.second = desc.AdapterLuid.LowPart;
adapters.push_back(dev);
}
return adapters;
}
std::shared_ptr<instance> d3d11::create(const device& target)
{
std::shared_ptr<d3d11_instance> inst;
ATL::CComPtr<ID3D11Device> device;
IDXGIAdapter1* adapter = nullptr;
// Find the correct "Adapter" (device).
IDXGIAdapter1* dxgi_adapter = nullptr;
for (UINT idx = 0; !FAILED(_dxgifactory->EnumAdapters1(idx, &dxgi_adapter)); idx++) {
DXGI_ADAPTER_DESC1 desc = DXGI_ADAPTER_DESC1();
dxgi_adapter->GetDesc1(&desc);
if ((desc.AdapterLuid.LowPart == target.id.second) && (desc.AdapterLuid.HighPart == target.id.first)) {
adapter = dxgi_adapter;
break;
}
}
// Create a D3D11 Device
UINT device_flags = D3D11_CREATE_DEVICE_VIDEO_SUPPORT;
std::vector<D3D_FEATURE_LEVEL> feature_levels = {D3D_FEATURE_LEVEL_12_1, D3D_FEATURE_LEVEL_12_0, D3D_FEATURE_LEVEL_11_1};
if (FAILED(_D3D11CreateDevice(adapter, D3D_DRIVER_TYPE_HARDWARE, NULL, device_flags, feature_levels.data(), static_cast<UINT>(feature_levels.size()), D3D11_SDK_VERSION, &device, NULL, NULL))) {
throw std::runtime_error("Failed to create D3D11 device for target.");
}
return std::make_shared<d3d11_instance>(device);
}
std::shared_ptr<instance> d3d11::create_from_obs()
{
auto gctx = streamfx::obs::gs::context();
if (GS_DEVICE_DIRECT3D_11 != gs_get_device_type()) {
throw std::runtime_error("OBS Device is not a D3D11 Device.");
}
ATL::CComPtr<ID3D11Device> device = ATL::CComPtr<ID3D11Device>(reinterpret_cast<ID3D11Device*>(gs_get_device_obj()));
return std::make_shared<d3d11_instance>(device);
}
struct D3D11AVFrame {
ATL::CComPtr<ID3D11Texture2D> handle;
};
d3d11_instance::d3d11_instance(ATL::CComPtr<ID3D11Device> device) : _device(device)
{
// Acquire immediate rendering context.
device->GetImmediateContext(&_context);
}
d3d11_instance::~d3d11_instance()
{
//_context.Release(); // Automatically performed by ATL::CComPtr.
}
AVBufferRef* d3d11_instance::create_device_context()
{
AVBufferRef* dctx_ref = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_D3D11VA);
if (!dctx_ref)
throw std::runtime_error("Failed to allocate AVHWDeviceContext.");
AVHWDeviceContext* hwdev = reinterpret_cast<AVHWDeviceContext*>(dctx_ref->data);
AVD3D11VADeviceContext* device_hwctx = reinterpret_cast<AVD3D11VADeviceContext*>(hwdev->hwctx);
// Provide the base device information only.
device_hwctx->device = _device;
device_hwctx->device->AddRef();
// And a way to lock/unlock the device.
device_hwctx->lock = [](void*) { obs_enter_graphics(); };
device_hwctx->unlock = [](void*) { obs_leave_graphics(); };
// Then let FFmpeg do the rest for us.
int ret = av_hwdevice_ctx_init(dctx_ref);
if (ret < 0)
throw std::runtime_error("Failed to initialize AVHWDeviceContext.");
return dctx_ref;
}
std::shared_ptr<AVFrame> d3d11_instance::allocate_frame(AVBufferRef* frames)
{
auto gctx = streamfx::obs::gs::context();
// Allocate a frame.
auto frame = std::shared_ptr<AVFrame>(av_frame_alloc(), [](AVFrame* frame) { av_frame_free(&frame); });
// Create the necessary buffers.
if (av_hwframe_get_buffer(frames, frame.get(), 0) < 0) {
throw std::runtime_error("Failed to create AVFrame.");
}
// Try to prevent this resource from ever leaving the GPU unless absolutely necessary.
reinterpret_cast<ID3D11Texture2D*>(frame->data[0])->SetEvictionPriority(DXGI_RESOURCE_PRIORITY_MAXIMUM);
return frame;
}
void d3d11_instance::copy_from_obs(AVBufferRef*, uint32_t handle, uint64_t lock_key, uint64_t* next_lock_key, std::shared_ptr<AVFrame> frame)
{
auto gctx = streamfx::obs::gs::context();
// Attempt to acquire shared texture.
ATL::CComPtr<ID3D11Texture2D> input;
if (FAILED(_device->OpenSharedResource(reinterpret_cast<HANDLE>(static_cast<uintptr_t>(handle)), __uuidof(ID3D11Texture2D), reinterpret_cast<void**>(&input)))) {
throw std::runtime_error("Failed to open shared texture resource.");
}
// Attempt to acquire texture mutex.
ATL::CComPtr<IDXGIKeyedMutex> mutex;
if (FAILED(input->QueryInterface(__uuidof(IDXGIKeyedMutex), reinterpret_cast<void**>(&mutex)))) {
throw std::runtime_error("Failed to retrieve mutex for texture resource.");
}
// Attempt to acquire texture lock.
if (FAILED(mutex->AcquireSync(lock_key, 1000))) {
throw std::runtime_error("Failed to acquire lock on input texture.");
}
// Set some parameters on the input texture, and get its description.
UINT evict = input->GetEvictionPriority();
input->SetEvictionPriority(DXGI_RESOURCE_PRIORITY_MAXIMUM);
// Clone the content of the input texture.
_context->CopyResource(reinterpret_cast<ID3D11Texture2D*>(frame->data[0]), input);
// Restore original parameters on input.
input->SetEvictionPriority(evict);
// Release the acquired lock.
if (FAILED(mutex->ReleaseSync(lock_key))) {
throw std::runtime_error("Failed to release lock on input texture.");
}
// Release the lock on the next texture.
// TODO: Determine if this is necessary.
mutex->ReleaseSync(*next_lock_key);
}
std::shared_ptr<AVFrame> d3d11_instance::avframe_from_obs(AVBufferRef* frames, uint32_t handle, uint64_t lock_key, uint64_t* next_lock_key)
{
auto gctx = streamfx::obs::gs::context();
auto frame = this->allocate_frame(frames);
this->copy_from_obs(frames, handle, lock_key, next_lock_key, frame);
return frame;
}
#endif

View File

@ -0,0 +1,58 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "base.hpp"
#include "warning-disable.hpp"
#include <atlutil.h>
#include <d3d11.h>
#include <d3d11_1.h>
#include <dxgi.h>
#include "warning-enable.hpp"
namespace streamfx::ffmpeg::hwapi {
class d3d11 : public streamfx::ffmpeg::hwapi::base {
typedef HRESULT(__stdcall* CreateDXGIFactory_t)(REFIID, void**);
typedef HRESULT(__stdcall* CreateDXGIFactory1_t)(REFIID, void**);
typedef HRESULT(__stdcall* D3D11CreateDevice_t)(IDXGIAdapter*, D3D_DRIVER_TYPE, HMODULE, UINT, CONST D3D_FEATURE_LEVEL*, UINT, UINT, ID3D11Device**, D3D_FEATURE_LEVEL*, ID3D11DeviceContext**);
HMODULE _dxgi_module;
CreateDXGIFactory_t _CreateDXGIFactory;
CreateDXGIFactory1_t _CreateDXGIFactory1;
HMODULE _d3d11_module;
D3D11CreateDevice_t _D3D11CreateDevice;
ATL::CComPtr<IDXGIFactory1> _dxgifactory;
public:
d3d11();
virtual ~d3d11();
virtual std::list<hwapi::device> enumerate_adapters() override;
virtual std::shared_ptr<hwapi::instance> create(const hwapi::device& target) override;
virtual std::shared_ptr<hwapi::instance> create_from_obs() override;
};
class d3d11_instance : public streamfx::ffmpeg::hwapi::instance {
ATL::CComPtr<ID3D11Device> _device;
ATL::CComPtr<ID3D11DeviceContext> _context;
public:
d3d11_instance(ATL::CComPtr<ID3D11Device> device);
virtual ~d3d11_instance();
virtual AVBufferRef* create_device_context() override;
virtual std::shared_ptr<AVFrame> allocate_frame(AVBufferRef* frames) override;
virtual void copy_from_obs(AVBufferRef* frames, uint32_t handle, uint64_t lock_key, uint64_t* next_lock_key, std::shared_ptr<AVFrame> frame) override;
virtual std::shared_ptr<AVFrame> avframe_from_obs(AVBufferRef* frames, uint32_t handle, uint64_t lock_key, uint64_t* next_lock_key) override;
};
} // namespace streamfx::ffmpeg::hwapi

View File

@ -0,0 +1,186 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// AUTOGENERATED COPYRIGHT HEADER END
#include "swscale.hpp"
#include "warning-disable.hpp"
#include <stdexcept>
#include "warning-enable.hpp"
using namespace streamfx::ffmpeg;
swscale::swscale() = default;
swscale::~swscale()
{
finalize();
}
void swscale::set_source_size(uint32_t width, uint32_t height)
{
source_size.first = width;
source_size.second = height;
}
void swscale::get_source_size(uint32_t& width, uint32_t& height)
{
width = this->source_size.first;
height = this->source_size.second;
}
std::pair<uint32_t, uint32_t> swscale::get_source_size()
{
return this->source_size;
}
uint32_t swscale::get_source_width()
{
return this->source_size.first;
}
uint32_t swscale::get_source_height()
{
return this->source_size.second;
}
void swscale::set_source_format(AVPixelFormat format)
{
source_format = format;
}
AVPixelFormat swscale::get_source_format()
{
return this->source_format;
}
void swscale::set_source_color(bool full_range, AVColorSpace space)
{
source_full_range = full_range;
source_colorspace = space;
}
void swscale::set_source_colorspace(AVColorSpace space)
{
this->source_colorspace = space;
}
AVColorSpace swscale::get_source_colorspace()
{
return this->source_colorspace;
}
void swscale::set_source_full_range(bool full_range)
{
this->source_full_range = full_range;
}
bool swscale::is_source_full_range()
{
return this->source_full_range;
}
void swscale::set_target_size(uint32_t width, uint32_t height)
{
target_size.first = width;
target_size.second = height;
}
void swscale::get_target_size(uint32_t& width, uint32_t& height)
{
width = target_size.first;
height = target_size.second;
}
std::pair<uint32_t, uint32_t> swscale::get_target_size()
{
return this->target_size;
}
uint32_t swscale::get_target_width()
{
return this->target_size.first;
}
uint32_t swscale::get_target_height()
{
return this->target_size.second;
}
void swscale::set_target_format(AVPixelFormat format)
{
target_format = format;
}
AVPixelFormat swscale::get_target_format()
{
return this->target_format;
}
void swscale::set_target_color(bool full_range, AVColorSpace space)
{
target_full_range = full_range;
target_colorspace = space;
}
void swscale::set_target_colorspace(AVColorSpace space)
{
this->target_colorspace = space;
}
AVColorSpace swscale::get_target_colorspace()
{
return this->target_colorspace;
}
void swscale::set_target_full_range(bool full_range)
{
this->target_full_range = full_range;
}
bool swscale::is_target_full_range()
{
return this->target_full_range;
}
bool swscale::initialize(int flags)
{
if (this->context) {
return false;
}
if (source_size.first == 0 || source_size.second == 0 || source_format == AV_PIX_FMT_NONE || source_colorspace == AVCOL_SPC_UNSPECIFIED) {
throw std::invalid_argument("not all source parameters were set");
}
if (target_size.first == 0 || target_size.second == 0 || target_format == AV_PIX_FMT_NONE || target_colorspace == AVCOL_SPC_UNSPECIFIED) {
throw std::invalid_argument("not all target parameters were set");
}
this->context = sws_getContext(static_cast<int>(source_size.first), static_cast<int>(source_size.second), source_format, static_cast<int>(target_size.first), static_cast<int>(target_size.second), target_format, flags, nullptr, nullptr, nullptr);
if (!this->context) {
return false;
}
sws_setColorspaceDetails(this->context, sws_getCoefficients(source_colorspace), source_full_range ? 1 : 0, sws_getCoefficients(target_colorspace), target_full_range ? 1 : 0, 1L << 16 | 0L, 1L << 16 | 0L, 1L << 16 | 0L);
return true;
}
bool swscale::finalize()
{
if (this->context) {
sws_freeContext(this->context);
this->context = nullptr;
return true;
}
return false;
}
int32_t swscale::convert(const uint8_t* const source_data[], const int source_stride[], int32_t source_row, int32_t source_rows, uint8_t* const target_data[], const int target_stride[])
{
if (!this->context) {
return 0;
}
int height = sws_scale(this->context, source_data, source_stride, source_row, source_rows, target_data, target_stride);
return height;
}

View File

@ -0,0 +1,68 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "common.hpp"
#include "warning-disable.hpp"
#include <utility>
#include "warning-enable.hpp"
extern "C" {
#include "warning-disable.hpp"
#include <libavutil/pixfmt.h>
#include <libswscale/swscale.h>
#include "warning-enable.hpp"
}
namespace streamfx::ffmpeg {
class swscale {
std::pair<uint32_t, uint32_t> source_size;
AVPixelFormat source_format = AV_PIX_FMT_NONE;
bool source_full_range = false;
AVColorSpace source_colorspace = AVCOL_SPC_UNSPECIFIED;
std::pair<uint32_t, uint32_t> target_size;
AVPixelFormat target_format = AV_PIX_FMT_NONE;
bool target_full_range = false;
AVColorSpace target_colorspace = AVCOL_SPC_UNSPECIFIED;
SwsContext* context = nullptr;
public:
swscale();
~swscale();
void set_source_size(uint32_t width, uint32_t height);
void get_source_size(uint32_t& width, uint32_t& height);
std::pair<uint32_t, uint32_t> get_source_size();
uint32_t get_source_width();
uint32_t get_source_height();
void set_source_format(AVPixelFormat format);
AVPixelFormat get_source_format();
void set_source_color(bool full_range, AVColorSpace space);
void set_source_colorspace(AVColorSpace space);
AVColorSpace get_source_colorspace();
void set_source_full_range(bool full_range);
bool is_source_full_range();
void set_target_size(uint32_t width, uint32_t height);
void get_target_size(uint32_t& width, uint32_t& height);
std::pair<uint32_t, uint32_t> get_target_size();
uint32_t get_target_width();
uint32_t get_target_height();
void set_target_format(AVPixelFormat format);
AVPixelFormat get_target_format();
void set_target_color(bool full_range, AVColorSpace space);
void set_target_colorspace(AVColorSpace space);
AVColorSpace get_target_colorspace();
void set_target_full_range(bool full_range);
bool is_target_full_range();
bool initialize(int flags);
bool finalize();
int32_t convert(const uint8_t* const source_data[], const int source_stride[], int32_t source_row, int32_t source_rows, uint8_t* const target_data[], const int target_stride[]);
};
} // namespace streamfx::ffmpeg

View File

@ -0,0 +1,463 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// AUTOGENERATED COPYRIGHT HEADER END
#include "tools.hpp"
#include "plugin.hpp"
#include "warning-disable.hpp"
#include <list>
#include <sstream>
#include "warning-enable.hpp"
extern "C" {
#include "warning-disable.hpp"
#include <libavcodec/avcodec.h>
#include <libavutil/error.h>
#include <libavutil/opt.h>
#include <libavutil/pixdesc.h>
#include "warning-enable.hpp"
}
using namespace streamfx::ffmpeg;
const char* tools::get_pixel_format_name(AVPixelFormat v)
{
return av_get_pix_fmt_name(v);
}
const char* tools::get_color_space_name(AVColorSpace v)
{
switch (v) {
case AVCOL_SPC_RGB:
return "RGB";
case AVCOL_SPC_BT709:
return "BT.709";
case AVCOL_SPC_FCC:
return "FCC Title 47 CoFR 73.682 (a)(20)";
case AVCOL_SPC_BT470BG:
return "BT.601 625";
case AVCOL_SPC_SMPTE170M:
case AVCOL_SPC_SMPTE240M:
return "BT.601 525";
case AVCOL_SPC_YCGCO:
return "ITU-T SG16";
case AVCOL_SPC_BT2020_NCL:
return "BT.2020 NCL";
case AVCOL_SPC_BT2020_CL:
return "BT.2020 CL";
case AVCOL_SPC_SMPTE2085:
return "SMPTE 2085";
case AVCOL_SPC_CHROMA_DERIVED_NCL:
return "Chroma NCL";
case AVCOL_SPC_CHROMA_DERIVED_CL:
return "Chroma CL";
case AVCOL_SPC_ICTCP:
return "BT.2100";
case AVCOL_SPC_NB:
return "Not Part of ABI";
default:
return "Unknown";
}
}
const char* tools::get_error_description(int error)
{
thread_local char error_buf[AV_ERROR_MAX_STRING_SIZE + 1];
if (av_strerror(error, error_buf, AV_ERROR_MAX_STRING_SIZE) < 0) {
snprintf(error_buf, AV_ERROR_MAX_STRING_SIZE, "Unknown Error (%i)", error);
}
return error_buf;
}
static std::map<video_format, AVPixelFormat> const obs_to_av_format_map = {
{VIDEO_FORMAT_I420, AV_PIX_FMT_YUV420P}, // 4:2:0 YUV, 8bit, Planar
{VIDEO_FORMAT_NV12, AV_PIX_FMT_NV12}, // 4:2:0 YUV, 8bit, Packed (Y+UV)
{VIDEO_FORMAT_YVYU, AV_PIX_FMT_YVYU422}, // 4:2:0 YUV, 8bit, Packed (Y+UV)
{VIDEO_FORMAT_YUY2, AV_PIX_FMT_YUYV422}, // 4:2:2 YUV, 8bit, Packed (Y+UV)
{VIDEO_FORMAT_UYVY, AV_PIX_FMT_UYVY422}, // 4:2:2 YUV, 8bit, Packed (Y+UV)
{VIDEO_FORMAT_RGBA, AV_PIX_FMT_RGBA}, // 4:4:4:4 RGBA, 8bit, Planar
{VIDEO_FORMAT_BGRA, AV_PIX_FMT_BGRA}, // 4:4:4:4 BGRA, 8bit, Planar
{VIDEO_FORMAT_BGRX, AV_PIX_FMT_BGR0}, // 4:4:4 BGR, 8bit, Planar
{VIDEO_FORMAT_Y800, AV_PIX_FMT_GRAY8}, // 4:0:0 Y, 8bit, Planar
{VIDEO_FORMAT_I444, AV_PIX_FMT_YUV444P}, // 4:4:4 YUV, 8bit, Planar
{VIDEO_FORMAT_BGR3, AV_PIX_FMT_BGR24}, // 4:4:4 BGR, 8bit, Planar
{VIDEO_FORMAT_I422, AV_PIX_FMT_YUV422P}, // 4:2:2 YUV, 8bit, Planar
{VIDEO_FORMAT_I40A, AV_PIX_FMT_YUVA420P}, // 4:2:0:4 YUVA, 8bit, Planar
{VIDEO_FORMAT_I42A, AV_PIX_FMT_YUVA422P}, // 4:2:2:4 YUVA, 8bit, Planar
{VIDEO_FORMAT_YUVA, AV_PIX_FMT_YUVA444P}, // 4:4:4:4 YUVA, 8bit, Planar
{VIDEO_FORMAT_AYUV, AV_PIX_FMT_NONE}, // No compatible format known
{VIDEO_FORMAT_I010, AV_PIX_FMT_YUV420P10}, // 4:2:0, 10bit, Planar
{VIDEO_FORMAT_P010, AV_PIX_FMT_P010}, // 4:2:0, 10bit, Packed (Y+UV)
{VIDEO_FORMAT_I210, AV_PIX_FMT_YUV422P10}, // 4:2:2 YUV, 10bit, Planar
{VIDEO_FORMAT_I412, AV_PIX_FMT_YUV444P12}, // 4:4:4 YUV, 12bit, Planar
{VIDEO_FORMAT_YA2L, AV_PIX_FMT_YUVA444P12}, // 4:4:4:4 YUVA, 12bit, Planar
};
AVPixelFormat tools::obs_videoformat_to_avpixelformat(video_format v)
{
auto found = obs_to_av_format_map.find(v);
if (found != obs_to_av_format_map.end()) {
return found->second;
}
return AV_PIX_FMT_NONE;
}
video_format tools::avpixelformat_to_obs_videoformat(AVPixelFormat v)
{
for (const auto& kv : obs_to_av_format_map) {
if (kv.second == v)
return kv.first;
}
return VIDEO_FORMAT_NONE;
}
AVPixelFormat tools::get_least_lossy_format(const AVPixelFormat* haystack, AVPixelFormat needle)
{
int data_loss = 0;
return avcodec_find_best_pix_fmt_of_list(haystack, needle, 0, &data_loss);
}
AVColorRange tools::obs_to_av_color_range(video_range_type v)
{
switch (v) {
case VIDEO_RANGE_DEFAULT:
case VIDEO_RANGE_PARTIAL:
return AVCOL_RANGE_MPEG;
case VIDEO_RANGE_FULL:
return AVCOL_RANGE_JPEG;
}
throw std::invalid_argument("Unknown Color Range");
}
AVColorSpace tools::obs_to_av_color_space(video_colorspace v)
{
switch (v) {
case VIDEO_CS_601: // BT.601
return AVCOL_SPC_SMPTE170M;
case VIDEO_CS_DEFAULT:
case VIDEO_CS_709: // BT.709
case VIDEO_CS_SRGB: // sRGB
return AVCOL_SPC_BT709;
case VIDEO_CS_2100_PQ:
case VIDEO_CS_2100_HLG:
return AVCOL_SPC_ICTCP;
default:
throw std::invalid_argument("Unknown Color Space");
}
}
AVColorPrimaries streamfx::ffmpeg::tools::obs_to_av_color_primary(video_colorspace v)
{
switch (v) {
case VIDEO_CS_601: // BT.601
return AVCOL_PRI_SMPTE170M;
case VIDEO_CS_DEFAULT:
case VIDEO_CS_709: // BT.709
case VIDEO_CS_SRGB: // sRGB
return AVCOL_PRI_BT709;
case VIDEO_CS_2100_PQ:
case VIDEO_CS_2100_HLG:
return AVCOL_PRI_BT2020;
default:
throw std::invalid_argument("Unknown Color Primaries");
}
}
AVColorTransferCharacteristic streamfx::ffmpeg::tools::obs_to_av_color_transfer_characteristics(video_colorspace v)
{
switch (v) {
case VIDEO_CS_601: // BT.601
return AVCOL_TRC_SMPTE170M;
case VIDEO_CS_DEFAULT:
case VIDEO_CS_709: // BT.709
return AVCOL_TRC_BT709;
case VIDEO_CS_SRGB: // sRGB with IEC 61966-2-1
return AVCOL_TRC_IEC61966_2_1;
case VIDEO_CS_2100_PQ:
return AVCOL_TRC_SMPTE2084;
case VIDEO_CS_2100_HLG:
return AVCOL_TRC_ARIB_STD_B67;
default:
throw std::invalid_argument("Unknown Color Transfer Characteristics");
}
}
const char* tools::avoption_name_from_unit_value(const AVClass* cls, std::string_view unit, int64_t value)
{
for (const AVOption* opt = nullptr; (opt = av_opt_next(&cls, opt)) != nullptr;) {
// Skip all irrelevant options.
if (!opt->unit)
continue;
if (opt->unit != unit)
continue;
if (opt->name == unit)
continue;
if (opt->default_val.i64 == value)
return opt->name;
}
return nullptr;
}
bool tools::avoption_exists(const void* obj, std::string_view name)
{
for (const AVOption* opt = nullptr; (opt = av_opt_next(obj, opt)) != nullptr;) {
if (name == opt->name)
return true;
}
return false;
}
void tools::avoption_list_add_entries(const void* obj, std::string_view unit, std::function<void(const AVOption*)> inserter)
{
std::string_view parent_name = unit;
std::string_view parent_unit = unit;
// Figure out the real unit if this is actually an option name.
const AVOption* parent = av_opt_find(const_cast<void*>(obj), unit.data(), nullptr, 0, AV_OPT_SEARCH_CHILDREN);
if (parent != nullptr) {
parent_name = parent->name;
if (parent->unit != nullptr) {
parent_unit = parent->unit;
}
}
for (const AVOption* opt = nullptr; (opt = av_opt_next(obj, opt)) != nullptr;) {
// Skip all irrelevant options.
if (!opt->unit)
continue;
if (opt->unit != parent_unit)
continue;
if (opt->name == parent_name)
continue;
// Skip any deprecated options.
if (opt->flags & AV_OPT_FLAG_DEPRECATED)
continue;
if (inserter) {
inserter(opt);
} else {
break;
}
}
}
bool tools::can_hardware_encode(const AVCodec* codec)
{
AVPixelFormat hardware_formats[] = {AV_PIX_FMT_D3D11};
for (const AVPixelFormat* fmt = codec->pix_fmts; (fmt != nullptr) && (*fmt != AV_PIX_FMT_NONE); fmt++) {
for (auto cmp : hardware_formats) {
if (*fmt == cmp) {
return true;
}
}
}
return false;
}
std::vector<AVPixelFormat> tools::get_software_formats(const AVPixelFormat* list)
{
constexpr AVPixelFormat hardware_formats[] = {
#if FF_API_VAAPI
AV_PIX_FMT_VAAPI_MOCO,
AV_PIX_FMT_VAAPI_IDCT,
#endif
AV_PIX_FMT_VAAPI,
AV_PIX_FMT_DXVA2_VLD,
AV_PIX_FMT_VDPAU,
AV_PIX_FMT_QSV,
AV_PIX_FMT_MMAL,
AV_PIX_FMT_D3D11VA_VLD,
AV_PIX_FMT_CUDA,
AV_PIX_FMT_XVMC,
AV_PIX_FMT_VIDEOTOOLBOX,
AV_PIX_FMT_MEDIACODEC,
AV_PIX_FMT_D3D11,
};
std::vector<AVPixelFormat> fmts;
for (auto fmt = list; fmt && (*fmt != AV_PIX_FMT_NONE); fmt++) {
bool is_blacklisted = false;
for (auto blacklisted : hardware_formats) {
if (*fmt == blacklisted)
is_blacklisted = true;
}
if (!is_blacklisted)
fmts.push_back(*fmt);
}
fmts.push_back(AV_PIX_FMT_NONE);
return fmts;
}
void tools::context_setup_from_obs(const video_output_info* voi, AVCodecContext* context)
{
// Resolution
context->width = static_cast<int>(voi->width);
context->height = static_cast<int>(voi->height);
// Framerate
context->ticks_per_frame = 1;
context->framerate.num = context->time_base.den = static_cast<int>(voi->fps_num);
context->framerate.den = context->time_base.num = static_cast<int>(voi->fps_den);
// Aspect Ratio, Progressive
context->sample_aspect_ratio.num = 1;
context->sample_aspect_ratio.den = 1;
context->field_order = AV_FIELD_PROGRESSIVE;
// Decipher Pixel information
context->pix_fmt = obs_videoformat_to_avpixelformat(voi->format);
context->color_range = obs_to_av_color_range(voi->range);
context->colorspace = obs_to_av_color_space(voi->colorspace);
context->color_primaries = obs_to_av_color_primary(voi->colorspace);
context->color_trc = obs_to_av_color_transfer_characteristics(voi->colorspace);
// Chroma Location
switch (context->pix_fmt) {
case AV_PIX_FMT_NV12:
case AV_PIX_FMT_YUV420P:
case AV_PIX_FMT_YUVA420P:
case AV_PIX_FMT_YUV422P:
case AV_PIX_FMT_YUVA422P:
case AV_PIX_FMT_YVYU422:
case AV_PIX_FMT_YUYV422:
case AV_PIX_FMT_UYVY422:
// libOBS merges Chroma at "Top", see H.264 specification.
context->chroma_sample_location = AVCHROMA_LOC_TOP;
break;
default:
// All other cases are unspecified.
context->chroma_sample_location = AVCHROMA_LOC_UNSPECIFIED;
break;
}
}
const char* tools::get_std_compliance_name(int compliance)
{
switch (compliance) {
case FF_COMPLIANCE_VERY_STRICT:
return "Very Strict";
case FF_COMPLIANCE_STRICT:
return "Strict";
case FF_COMPLIANCE_NORMAL:
return "Normal";
case FF_COMPLIANCE_UNOFFICIAL:
return "Unofficial";
case FF_COMPLIANCE_EXPERIMENTAL:
return "Experimental";
}
return "Invalid";
}
const char* tools::get_thread_type_name(int thread_type)
{
switch (thread_type) {
case FF_THREAD_FRAME | FF_THREAD_SLICE:
return "Slice & Frame";
case FF_THREAD_FRAME:
return "Frame";
case FF_THREAD_SLICE:
return "Slice";
default:
return "None";
}
}
void tools::print_av_option_bool(AVCodecContext* ctx_codec, const char* option, std::string_view text, bool inverse)
{
print_av_option_bool(ctx_codec, ctx_codec, option, text, inverse);
}
void tools::print_av_option_bool(AVCodecContext* ctx_codec, void* ctx_option, const char* option, std::string_view text, bool inverse)
{
int64_t v = 0;
if (int err = av_opt_get_int(ctx_option, option, AV_OPT_SEARCH_CHILDREN, &v); err != 0) {
DLOG_INFO("[%s] %s: <Error: %s>", ctx_codec->codec->name, text.data(), streamfx::ffmpeg::tools::get_error_description(err));
} else {
DLOG_INFO("[%s] %s: %s%s", ctx_codec->codec->name, text.data(), (inverse ? v != 0 : v == 0) ? "Disabled" : "Enabled", av_opt_is_set_to_default_by_name(ctx_option, option, AV_OPT_SEARCH_CHILDREN) > 0 ? " <Default>" : "");
}
}
void tools::print_av_option_int(AVCodecContext* ctx_codec, const char* option, std::string_view text, std::string_view suffix)
{
print_av_option_int(ctx_codec, ctx_codec, option, text, suffix);
}
void tools::print_av_option_int(AVCodecContext* ctx_codec, void* ctx_option, const char* option, std::string_view text, std::string_view suffix)
{
int64_t v = 0;
bool is_default = av_opt_is_set_to_default_by_name(ctx_option, option, AV_OPT_SEARCH_CHILDREN) > 0;
if (int err = av_opt_get_int(ctx_option, option, AV_OPT_SEARCH_CHILDREN, &v); err != 0) {
if (is_default) {
DLOG_INFO("[%s] %s: <Default>", ctx_codec->codec->name, text.data());
} else {
DLOG_INFO("[%s] %s: <Error: %s>", ctx_codec->codec->name, text.data(), streamfx::ffmpeg::tools::get_error_description(err));
}
} else {
DLOG_INFO("[%s] %s: %" PRId64 " %s%s", ctx_codec->codec->name, text.data(), v, suffix.data(), is_default ? " <Default>" : "");
}
}
void tools::print_av_option_string(AVCodecContext* ctx_codec, const char* option, std::string_view text, std::function<std::string(int64_t)> decoder)
{
print_av_option_string(ctx_codec, ctx_codec, option, text, decoder);
}
void tools::print_av_option_string(AVCodecContext* ctx_codec, void* ctx_option, const char* option, std::string_view text, std::function<std::string(int64_t)> decoder)
{
int64_t v = 0;
if (int err = av_opt_get_int(ctx_option, option, AV_OPT_SEARCH_CHILDREN, &v); err != 0) {
DLOG_INFO("[%s] %s: <Error: %s>", ctx_codec->codec->name, text.data(), streamfx::ffmpeg::tools::get_error_description(err));
} else {
std::string name = "<Unknown>";
if (decoder)
name = decoder(v);
DLOG_INFO("[%s] %s: %s%s", ctx_codec->codec->name, text.data(), name.c_str(), av_opt_is_set_to_default_by_name(ctx_option, option, AV_OPT_SEARCH_CHILDREN) > 0 ? " <Default>" : "");
}
}
void tools::print_av_option_string2(AVCodecContext* ctx_codec, std::string_view option, std::string_view text, std::function<std::string(int64_t, std::string_view)> decoder)
{
print_av_option_string2(ctx_codec, ctx_codec, option, text, decoder);
}
void tools::print_av_option_string2(AVCodecContext* ctx_codec, void* ctx_option, std::string_view option, std::string_view text, std::function<std::string(int64_t, std::string_view)> decoder)
{
int64_t v = 0;
if (int err = av_opt_get_int(ctx_option, option.data(), AV_OPT_SEARCH_CHILDREN, &v); err != 0) {
DLOG_INFO("[%s] %s: <Error: %s>", ctx_codec->codec->name, text.data(), tools::get_error_description(err));
} else {
std::string name = "<Unknown>";
// Find the unit for the option.
auto* opt = av_opt_find(ctx_option, option.data(), nullptr, 0, AV_OPT_SEARCH_CHILDREN);
if (opt && opt->unit) {
for (auto* opt_test = opt; (opt_test = av_opt_next(ctx_option, opt_test)) != nullptr;) {
// Skip this entry if the unit doesn't match.
if ((opt_test->unit == nullptr) || (strcmp(opt_test->unit, opt->unit) != 0)) {
continue;
}
// Assign correct name if we found one.
if (opt_test->default_val.i64 == v) {
name = opt_test->name;
break;
}
}
if (decoder) {
name = decoder(v, name);
}
DLOG_INFO("[%s] %s: %s%s", ctx_codec->codec->name, text.data(), name.c_str(), av_opt_is_set_to_default_by_name(ctx_option, option.data(), AV_OPT_SEARCH_CHILDREN) > 0 ? " <Default>" : "");
} else {
DLOG_INFO("[%s] %s: %" PRId64 "%s", ctx_codec->codec->name, text.data(), v, av_opt_is_set_to_default_by_name(ctx_option, option.data(), AV_OPT_SEARCH_CHILDREN) > 0 ? " <Default>" : "");
}
}
}

View File

@ -0,0 +1,66 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "common.hpp"
#include "warning-disable.hpp"
#include <functional>
#include "warning-enable.hpp"
extern "C" {
#include "warning-disable.hpp"
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/pixfmt.h>
#include "warning-enable.hpp"
}
namespace streamfx::ffmpeg::tools {
const char* get_pixel_format_name(AVPixelFormat v);
const char* get_color_space_name(AVColorSpace v);
const char* get_error_description(int error);
AVPixelFormat obs_videoformat_to_avpixelformat(video_format v);
video_format avpixelformat_to_obs_videoformat(AVPixelFormat v);
AVPixelFormat get_least_lossy_format(const AVPixelFormat* haystack, AVPixelFormat needle);
AVColorRange obs_to_av_color_range(video_range_type v);
AVColorSpace obs_to_av_color_space(video_colorspace v);
AVColorPrimaries obs_to_av_color_primary(video_colorspace v);
AVColorTransferCharacteristic obs_to_av_color_transfer_characteristics(video_colorspace v);
bool can_hardware_encode(const AVCodec* codec);
std::vector<AVPixelFormat> get_software_formats(const AVPixelFormat* list);
void context_setup_from_obs(const video_output_info* voi, AVCodecContext* context);
const char* get_std_compliance_name(int compliance);
const char* get_thread_type_name(int thread_type);
void print_av_option_bool(AVCodecContext* context, const char* option, std::string_view text, bool inverse = false);
void print_av_option_bool(AVCodecContext* ctx_codec, void* ctx_option, const char* option, std::string_view text, bool inverse = false);
void print_av_option_int(AVCodecContext* context, const char* option, std::string_view text, std::string_view suffix);
void print_av_option_int(AVCodecContext* ctx_codec, void* ctx_option, const char* option, std::string_view text, std::string_view suffix);
void print_av_option_string(AVCodecContext* context, const char* option, std::string_view text, std::function<std::string(int64_t)> decoder);
void print_av_option_string(AVCodecContext* ctx_codec, void* ctx_option, const char* option, std::string_view text, std::function<std::string(int64_t)> decoder);
void print_av_option_string2(AVCodecContext* context, std::string_view option, std::string_view text, std::function<std::string(int64_t, std::string_view)> decoder);
void print_av_option_string2(AVCodecContext* ctx_codec, void* ctx_option, std::string_view option, std::string_view text, std::function<std::string(int64_t, std::string_view)> decoder);
bool avoption_exists(const void* obj, std::string_view name);
const char* avoption_name_from_unit_value(const AVClass* cls, std::string_view unit, int64_t value);
void avoption_list_add_entries(const void* obj, std::string_view unit, std::function<void(const AVOption*)> inserter = nullptr);
} // namespace streamfx::ffmpeg::tools

View File

@ -0,0 +1,9 @@
# AUTOGENERATED COPYRIGHT HEADER START
# Copyright (C) 2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
# AUTOGENERATED COPYRIGHT HEADER END
cmake_minimum_required(VERSION 3.26)
project("Mirror")
list(APPEND CMAKE_MESSAGE_INDENT "[${PROJECT_NAME}] ")
streamfx_add_component("Mirror")

View File

@ -0,0 +1,404 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2017-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// AUTOGENERATED COPYRIGHT HEADER END
#include "source-mirror.hpp"
#include "strings.hpp"
#include <bitset>
#include <cstring>
#include <functional>
#include <memory>
#include <sstream>
#include <stdexcept>
#include <vector>
#include "obs/gs/gs-helper.hpp"
#include "obs/obs-source-tracker.hpp"
#include "obs/obs-tools.hpp"
#include "util/util-logging.hpp"
#ifdef _DEBUG
#define ST_PREFIX "<%s> "
#define D_LOG_ERROR(x, ...) P_LOG_ERROR(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#define D_LOG_WARNING(x, ...) P_LOG_WARN(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#define D_LOG_INFO(x, ...) P_LOG_INFO(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#define D_LOG_DEBUG(x, ...) P_LOG_DEBUG(ST_PREFIX##x, __FUNCTION_SIG__, __VA_ARGS__)
#else
#define ST_PREFIX "<source::mirror> "
#define D_LOG_ERROR(...) P_LOG_ERROR(ST_PREFIX __VA_ARGS__)
#define D_LOG_WARNING(...) P_LOG_WARN(ST_PREFIX __VA_ARGS__)
#define D_LOG_INFO(...) P_LOG_INFO(ST_PREFIX __VA_ARGS__)
#define D_LOG_DEBUG(...) P_LOG_DEBUG(ST_PREFIX __VA_ARGS__)
#endif
// OBS
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4464)
#pragma warning(disable : 4820)
#pragma warning(disable : 5220)
#else
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wall"
#pragma GCC diagnostic ignored "-Wextra"
#endif
#include <media-io/audio-io.h>
#ifdef _MSC_VER
#pragma warning(pop)
#else
#pragma GCC diagnostic pop
#endif
#define ST_I18N "Source.Mirror"
#define ST_I18N_SOURCE ST_I18N ".Source"
#define ST_KEY_SOURCE "Source.Mirror.Source"
#define ST_I18N_SOURCE_AUDIO ST_I18N_SOURCE ".Audio"
#define ST_KEY_SOURCE_AUDIO "Source.Mirror.Audio"
#define ST_I18N_SOURCE_AUDIO_LAYOUT ST_I18N_SOURCE_AUDIO ".Layout"
#define ST_KEY_SOURCE_AUDIO_LAYOUT "Source.Mirror.Audio.Layout"
#define ST_I18N_SOURCE_AUDIO_LAYOUT_(x) ST_I18N_SOURCE_AUDIO_LAYOUT "." D_VSTR(x)
using namespace streamfx::source::mirror;
static constexpr std::string_view HELP_URL = "https://github.com/Xaymar/obs-StreamFX/wiki/Source-Mirror";
mirror_audio_data::mirror_audio_data(const audio_data* audio, speaker_layout layout)
{
// Build a clone of a packet.
audio_t* oad = obs_get_audio();
const audio_output_info* aoi = audio_output_get_info(oad);
osa.frames = audio->frames;
osa.timestamp = audio->timestamp;
osa.speakers = layout;
osa.format = aoi->format;
osa.samples_per_sec = aoi->samples_per_sec;
data.resize(MAX_AV_PLANES);
for (std::size_t idx = 0; idx < MAX_AV_PLANES; idx++) {
if (!audio->data[idx]) {
osa.data[idx] = nullptr;
continue;
}
data[idx].resize(audio->frames * get_audio_bytes_per_channel(osa.format));
memcpy(data[idx].data(), audio->data[idx], data[idx].size());
osa.data[idx] = data[idx].data();
}
}
mirror_instance::mirror_instance(obs_data_t* settings, obs_source_t* self) : obs::source_instance(settings, self), _source(), _source_child(), _signal_rename(), _audio_enabled(false), _audio_layout(SPEAKERS_UNKNOWN)
{
update(settings);
}
mirror_instance::~mirror_instance()
{
release();
}
uint32_t mirror_instance::get_width()
{
return _source_size.first ? _source_size.first : 1;
}
uint32_t mirror_instance::get_height()
{
return _source_size.second ? _source_size.second : 1;
}
void mirror_instance::load(obs_data_t* data)
{
update(data);
}
void mirror_instance::migrate(obs_data_t* data, uint64_t version)
{
switch (version) {
case 0:
obs_data_set_int(data, ST_KEY_SOURCE_AUDIO_LAYOUT, obs_data_get_int(data, "Source.Mirror.Audio.Layout"));
obs_data_unset_user_value(data, "Source.Mirror.Audio.Layout");
case STREAMFX_VERSION:
break;
}
}
void mirror_instance::update(obs_data_t* data)
{
// Audio
_audio_enabled = obs_data_get_bool(data, ST_KEY_SOURCE_AUDIO);
_audio_layout = static_cast<speaker_layout>(obs_data_get_int(data, ST_KEY_SOURCE_AUDIO_LAYOUT));
// Acquire new source.
acquire(obs_data_get_string(data, ST_KEY_SOURCE));
}
void mirror_instance::save(obs_data_t* data)
{
if (_source) {
obs_data_set_string(data, ST_KEY_SOURCE, obs_source_get_name(_source.get()));
} else {
obs_data_unset_user_value(data, ST_KEY_SOURCE);
}
}
void mirror_instance::video_tick(float time) {}
void mirror_instance::video_render(gs_effect_t* effect)
{
if (!_source)
return;
if ((obs_source_get_output_flags(_source.get()) & OBS_SOURCE_VIDEO) == 0)
return;
#if defined(ENABLE_PROFILING) && !defined(D_PLATFORM_MAC) && _DEBUG
streamfx::obs::gs::debug_marker gdmp{streamfx::obs::gs::debug_color_source, "Source Mirror '%s' for '%s'", obs_source_get_name(_self), obs_source_get_name(_source.get())};
#endif
_source_size.first = obs_source_get_width(_source.get());
_source_size.second = obs_source_get_height(_source.get());
obs_source_video_render(_source.get());
}
void mirror_instance::enum_active_sources(obs_source_enum_proc_t cb, void* ptr)
{
if (!_source)
return;
cb(_self, _source.get(), ptr);
}
void mirror_instance::enum_all_sources(obs_source_enum_proc_t cb, void* ptr)
{
if (!_source)
return;
cb(_self, _source.get(), ptr);
}
void mirror_instance::acquire(std::string source_name)
{
try {
release();
// Find source by name if possible.
decltype(_source) source{source_name};
if ((!source) || (source == _self)) { // If we failed, just exit early.
return;
}
// Everything went well, store.
_source_child = std::make_shared<::streamfx::obs::source_active_child>(_self, source);
_source = std::move(source);
_source_size.first = obs_source_get_width(_source);
_source_size.second = obs_source_get_height(_source);
// Listen to any audio the source spews out.
if (_audio_enabled) {
_signal_audio = std::make_shared<obs::audio_signal_handler>(_source);
_signal_audio->event.add(std::bind(&mirror_instance::on_audio, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3));
}
} catch (...) {
release();
}
}
void mirror_instance::release()
{
_signal_audio.reset();
_signal_rename.reset();
_source_child.reset();
_source.release();
}
void mirror_instance::on_audio(::streamfx::obs::source, const audio_data* audio, bool)
{
// Immediately quit if there isn't any actual audio to send out.
if (!_audio_enabled) {
return;
}
// Detect Audio Layout from underlying audio.
speaker_layout detected_layout;
if (_audio_layout != SPEAKERS_UNKNOWN) {
detected_layout = _audio_layout;
} else {
std::bitset<MAX_AV_PLANES> layout_detection;
for (std::size_t idx = 0; idx < MAX_AV_PLANES; idx++) {
layout_detection.set(idx, audio->data[idx] != nullptr);
}
switch (layout_detection.to_ulong()) {
case 0b00000001:
detected_layout = SPEAKERS_MONO;
break;
case 0b00000011:
detected_layout = SPEAKERS_STEREO;
break;
case 0b00000111:
detected_layout = SPEAKERS_2POINT1;
break;
case 0b00001111:
detected_layout = SPEAKERS_4POINT0;
break;
case 0b00011111:
detected_layout = SPEAKERS_4POINT1;
break;
case 0b00111111:
detected_layout = SPEAKERS_5POINT1;
break;
case 0b11111111:
detected_layout = SPEAKERS_7POINT1;
break;
default:
detected_layout = SPEAKERS_UNKNOWN;
break;
}
}
{
std::unique_lock<std::mutex> ul(_audio_queue_lock);
_audio_queue.emplace(audio, detected_layout);
}
// Create a clone of the audio data and push it to the thread pool.
streamfx::util::threadpool::threadpool::instance()->push(std::bind(&mirror_instance::audio_output, this, std::placeholders::_1), nullptr);
}
void mirror_instance::audio_output(std::shared_ptr<void> data)
{
std::unique_lock<std::mutex> ul(_audio_queue_lock);
while (_audio_queue.size() > 0) {
obs_source_output_audio(_self, &((_audio_queue.front()).osa));
_audio_queue.pop();
}
}
mirror_factory::mirror_factory()
{
_info.id = S_PREFIX "source-mirror";
_info.type = OBS_SOURCE_TYPE_INPUT;
_info.output_flags = OBS_SOURCE_VIDEO | OBS_SOURCE_CUSTOM_DRAW | OBS_SOURCE_AUDIO;
support_active_child_sources(true);
support_child_sources(true);
finish_setup();
register_proxy("obs-stream-effects-source-mirror");
}
mirror_factory::~mirror_factory() {}
const char* mirror_factory::get_name()
{
return D_TRANSLATE(ST_I18N);
}
void mirror_factory::get_defaults2(obs_data_t* data)
{
obs_data_set_default_string(data, ST_KEY_SOURCE, "");
obs_data_set_default_bool(data, ST_KEY_SOURCE_AUDIO, false);
obs_data_set_default_int(data, ST_KEY_SOURCE_AUDIO_LAYOUT, static_cast<int64_t>(SPEAKERS_UNKNOWN));
}
static bool modified_properties(obs_properties_t* pr, obs_property_t* p, obs_data_t* data) noexcept
{
try {
if (obs_properties_get(pr, ST_KEY_SOURCE_AUDIO) == p) {
bool show = obs_data_get_bool(data, ST_KEY_SOURCE_AUDIO);
obs_property_set_visible(obs_properties_get(pr, ST_KEY_SOURCE_AUDIO_LAYOUT), show);
return true;
}
return false;
} catch (...) {
return false;
}
}
obs_properties_t* mirror_factory::get_properties2(mirror_instance* data)
{
obs_properties_t* pr = obs_properties_create();
obs_property_t* p = nullptr;
{
obs_properties_add_button2(pr, S_MANUAL_OPEN, D_TRANSLATE(S_MANUAL_OPEN), streamfx::source::mirror::mirror_factory::on_manual_open, nullptr);
}
{
p = obs_properties_add_list(pr, ST_KEY_SOURCE, D_TRANSLATE(ST_I18N_SOURCE), OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_STRING);
obs_property_set_modified_callback(p, modified_properties);
obs_property_list_add_string(p, "", "");
obs::source_tracker::instance()->enumerate(
[&p](std::string name, ::streamfx::obs::source) {
std::stringstream sstr;
sstr << name << " (" << D_TRANSLATE(S_SOURCETYPE_SOURCE) << ")";
obs_property_list_add_string(p, sstr.str().c_str(), name.c_str());
return false;
},
obs::source_tracker::filter_sources);
obs::source_tracker::instance()->enumerate(
[&p](std::string name, ::streamfx::obs::source) {
std::stringstream sstr;
sstr << name << " (" << D_TRANSLATE(S_SOURCETYPE_SCENE) << ")";
obs_property_list_add_string(p, sstr.str().c_str(), name.c_str());
return false;
},
obs::source_tracker::filter_scenes);
}
{
p = obs_properties_add_bool(pr, ST_KEY_SOURCE_AUDIO, D_TRANSLATE(ST_I18N_SOURCE_AUDIO));
obs_property_set_modified_callback(p, modified_properties);
}
{
p = obs_properties_add_list(pr, ST_KEY_SOURCE_AUDIO_LAYOUT, D_TRANSLATE(ST_I18N_SOURCE_AUDIO_LAYOUT), OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_INT);
obs_property_list_add_int(p, D_TRANSLATE(ST_I18N_SOURCE_AUDIO_LAYOUT_(Unknown)), static_cast<int64_t>(SPEAKERS_UNKNOWN));
obs_property_list_add_int(p, D_TRANSLATE(ST_I18N_SOURCE_AUDIO_LAYOUT_(Mono)), static_cast<int64_t>(SPEAKERS_MONO));
obs_property_list_add_int(p, D_TRANSLATE(ST_I18N_SOURCE_AUDIO_LAYOUT_(Stereo)), static_cast<int64_t>(SPEAKERS_STEREO));
obs_property_list_add_int(p, D_TRANSLATE(ST_I18N_SOURCE_AUDIO_LAYOUT_(StereoLFE)), static_cast<int64_t>(SPEAKERS_2POINT1));
obs_property_list_add_int(p, D_TRANSLATE(ST_I18N_SOURCE_AUDIO_LAYOUT_(Quadraphonic)), static_cast<int64_t>(SPEAKERS_4POINT0));
obs_property_list_add_int(p, D_TRANSLATE(ST_I18N_SOURCE_AUDIO_LAYOUT_(QuadraphonicLFE)), static_cast<int64_t>(SPEAKERS_4POINT1));
obs_property_list_add_int(p, D_TRANSLATE(ST_I18N_SOURCE_AUDIO_LAYOUT_(Surround)), static_cast<int64_t>(SPEAKERS_5POINT1));
obs_property_list_add_int(p, D_TRANSLATE(ST_I18N_SOURCE_AUDIO_LAYOUT_(FullSurround)), static_cast<int64_t>(SPEAKERS_7POINT1));
}
return pr;
}
bool mirror_factory::on_manual_open(obs_properties_t* props, obs_property_t* property, void* data)
{
try {
streamfx::open_url(HELP_URL);
return false;
} catch (const std::exception& ex) {
D_LOG_ERROR("Failed to open manual due to error: %s", ex.what());
return false;
} catch (...) {
D_LOG_ERROR("Failed to open manual due to unknown error.", "");
return false;
}
}
std::shared_ptr<mirror_factory> mirror_factory::instance()
{
static std::weak_ptr<mirror_factory> winst;
static std::mutex mtx;
std::unique_lock<decltype(mtx)> lock(mtx);
auto instance = winst.lock();
if (!instance) {
instance = std::shared_ptr<mirror_factory>(new mirror_factory());
winst = instance;
}
return instance;
}
static std::shared_ptr<mirror_factory> loader_instance;
static auto loader = streamfx::component(
"source_mirror",
[]() { // Initializer
loader_instance = mirror_factory::instance();
},
[]() { // Finalizer
loader_instance.reset();
},
{"core::source_tracker"});

View File

@ -0,0 +1,93 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2017-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "common.hpp"
#include "gfx/gfx-source-texture.hpp"
#include "obs/gs/gs-texrender.hpp"
#include "obs/gs/gs-sampler.hpp"
#include "obs/obs-signal-handler.hpp"
#include "obs/obs-source-active-child.hpp"
#include "obs/obs-source-factory.hpp"
#include "obs/obs-source.hpp"
#include "obs/obs-tools.hpp"
#include "warning-disable.hpp"
#include <condition_variable>
#include <mutex>
#include <queue>
#include <thread>
#include <vector>
#include "warning-enable.hpp"
namespace streamfx::source::mirror {
struct mirror_audio_data {
mirror_audio_data(const audio_data*, speaker_layout);
obs_source_audio osa;
std::vector<std::vector<uint8_t>> data;
};
class mirror_instance : public obs::source_instance {
// Source
::streamfx::obs::source _source;
std::shared_ptr<::streamfx::obs::source_active_child> _source_child;
std::shared_ptr<obs::source_signal_handler> _signal_rename;
std::shared_ptr<obs::audio_signal_handler> _signal_audio;
std::pair<uint32_t, uint32_t> _source_size;
// Audio
bool _audio_enabled;
speaker_layout _audio_layout;
std::mutex _audio_queue_lock;
std::queue<mirror_audio_data> _audio_queue;
public:
mirror_instance(obs_data_t* settings, obs_source_t* self);
virtual ~mirror_instance();
virtual uint32_t get_width() override;
virtual uint32_t get_height() override;
virtual void load(obs_data_t*) override;
virtual void migrate(obs_data_t*, uint64_t) override;
virtual void update(obs_data_t*) override;
virtual void save(obs_data_t*) override;
virtual void video_tick(float) override;
virtual void video_render(gs_effect_t*) override;
virtual void enum_active_sources(obs_source_enum_proc_t, void*) override;
virtual void enum_all_sources(obs_source_enum_proc_t, void*) override;
private:
void acquire(std::string source_name);
void release();
void on_audio(::streamfx::obs::source, const struct audio_data*, bool);
void audio_output(std::shared_ptr<void> data);
};
class mirror_factory : public obs::source_factory<source::mirror::mirror_factory, source::mirror::mirror_instance> {
public:
mirror_factory();
virtual ~mirror_factory() override;
virtual const char* get_name() override;
virtual void get_defaults2(obs_data_t* data) override;
virtual obs_properties_t* get_properties2(source::mirror::mirror_instance* data) override;
static bool on_manual_open(obs_properties_t* props, obs_property_t* property, void* data);
public: // Singleton
static void initialize();
static void finalize();
static std::shared_ptr<mirror_factory> instance();
};
} // namespace streamfx::source::mirror

View File

@ -0,0 +1,48 @@
# AUTOGENERATED COPYRIGHT HEADER START
# Copyright (C) 2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
# AUTOGENERATED COPYRIGHT HEADER END
cmake_minimum_required(VERSION 3.26)
project("NVIDIA")
list(APPEND CMAKE_MESSAGE_INDENT "[${PROJECT_NAME}] ")
#- NVIDIA Audio Effects SDK
if(NOT TARGET NVIDIA::AFX)
add_library(NVIDIA::AFX IMPORTED INTERFACE)
target_include_directories(NVIDIA::AFX
INTERFACE
"${StreamFX_SOURCE_DIR}/third-party/nvidia-maxine-afx-sdk/nvafx/include/"
)
endif()
#- NVIDIA Augmented Reality SDK
if(NOT TARGET NVIDIA::AR)
add_library(NVIDIA::AR IMPORTED INTERFACE)
target_include_directories(NVIDIA::AR
INTERFACE
"${StreamFX_SOURCE_DIR}/third-party/nvidia-maxine-ar-sdk/nvar/include/"
"${StreamFX_SOURCE_DIR}/third-party/nvidia-maxine-ar-sdk/nvar/src/"
)
endif()
#- NVIDIA Video Effects SDK
if(NOT TARGET NVIDIA::VFX)
add_library(NVIDIA::VFX IMPORTED INTERFACE)
target_include_directories(NVIDIA::VFX
INTERFACE
"${StreamFX_SOURCE_DIR}/third-party/nvidia-maxine-vfx-sdk/nvvfx/include/"
"${StreamFX_SOURCE_DIR}/third-party/nvidia-maxine-vfx-sdk/nvvfx/src/"
)
endif()
streamfx_add_component("NVIDIA")
target_link_libraries(${COMPONENT_TARGET}
PRIVATE
NVIDIA::AFX
NVIDIA::AR
NVIDIA::VFX
)
if(NOT D_PLATFORM_WINDOWS)
streamfx_disable_component("NVIDIA" REASON "NVIDIA integration is (currently) only available for Windows under Direct3D11.")
endif()

View File

@ -0,0 +1,54 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2021-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "nvidia-ar-feature.hpp"
#include "nvidia/cuda/nvidia-cuda-gs-texture.hpp"
#include "nvidia/cuda/nvidia-cuda-obs.hpp"
#include "nvidia/cuda/nvidia-cuda.hpp"
#include "nvidia/cv/nvidia-cv-image.hpp"
#include "nvidia/cv/nvidia-cv-texture.hpp"
#include "obs/gs/gs-texture.hpp"
namespace streamfx::nvidia::ar {
class facedetection : public feature {
std::shared_ptr<::streamfx::nvidia::cv::texture> _input;
std::shared_ptr<::streamfx::nvidia::cv::image> _source;
std::shared_ptr<::streamfx::nvidia::cv::image> _tmp;
std::vector<rect_t> _rects;
std::vector<float> _rects_confidence;
bounds_t _bboxes;
bool _dirty;
public:
~facedetection();
/** Create a new face detection feature.
*
* Must be in a graphics and CUDA context when calling.
*/
facedetection();
std::pair<size_t, size_t> tracking_limit_range();
size_t tracking_limit();
void set_tracking_limit(size_t v);
void process(std::shared_ptr<::streamfx::obs::gs::texture> in);
size_t count();
rect_t const& at(size_t index);
rect_t const& at(size_t index, float& confidence);
private:
void resize(uint32_t width, uint32_t height);
void load();
};
} // namespace streamfx::nvidia::ar

View File

@ -0,0 +1,207 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "nvidia/ar/nvidia-ar.hpp"
#include "nvidia/cuda/nvidia-cuda-obs.hpp"
#include "nvidia/cv/nvidia-cv-image.hpp"
#include "nvidia/cv/nvidia-cv-texture.hpp"
#include "nvidia/cv/nvidia-cv.hpp"
#include "warning-disable.hpp"
#include <string_view>
#include "warning-enable.hpp"
namespace streamfx::nvidia::ar {
class feature {
protected:
std::shared_ptr<::streamfx::nvidia::cuda::obs> _nvcuda;
std::shared_ptr<::streamfx::nvidia::cv::cv> _nvcv;
std::shared_ptr<::streamfx::nvidia::ar::ar> _nvar;
std::shared_ptr<void> _fx;
std::u8string _model_path;
public:
~feature();
feature(feature_t feature);
::streamfx::nvidia::ar::handle_t get()
{
return _fx.get();
}
public /* Int32 */:
inline cv::result set_uint32(parameter_t param, uint32_t const value)
{
return _nvar->NvAR_SetU32(_fx.get(), param, value);
}
inline cv::result get_uint32(parameter_t param, uint32_t* value)
{
return _nvar->NvAR_GetU32(_fx.get(), param, value);
}
inline cv::result set_int32(parameter_t param, int32_t const value)
{
return _nvar->NvAR_SetS32(_fx.get(), param, value);
}
inline cv::result get_int32(parameter_t param, int32_t* value)
{
return _nvar->NvAR_GetS32(_fx.get(), param, value);
}
public /* Int64 */:
inline cv::result set_uint64(parameter_t param, uint64_t const value)
{
return _nvar->NvAR_SetU64(_fx.get(), param, value);
}
inline cv::result get_uint64(parameter_t param, uint64_t* value)
{
return _nvar->NvAR_GetU64(_fx.get(), param, value);
}
public /* Float32 */:
inline cv::result set_float32(parameter_t param, float const value)
{
return _nvar->NvAR_SetF32(_fx.get(), param, value);
}
inline cv::result get_float32(parameter_t param, float* value)
{
return _nvar->NvAR_GetF32(_fx.get(), param, value);
}
inline cv::result set_float32array(parameter_t param, float* const value, int32_t size)
{
return _nvar->NvAR_SetF32Array(_fx.get(), param, value, static_cast<int32_t>(size));
}
inline cv::result get_float32array(parameter_t param, const float* value, int32_t size)
{
return _nvar->NvAR_GetF32Array(_fx.get(), param, &value, &size);
}
inline cv::result set_float32array(parameter_t param, std::vector<float> const& value)
{
return _nvar->NvAR_SetF32Array(_fx.get(), param, value.data(), static_cast<int32_t>(value.size()));
}
inline cv::result get_float32array(parameter_t param, std::vector<float>& value)
{
const float* data;
int32_t size;
cv::result result;
result = _nvar->NvAR_GetF32Array(_fx.get(), param, &data, &size);
value.resize(static_cast<size_t>(size));
memcpy(value.data(), data, size * sizeof(float));
return result;
}
public /* Float64 */:
inline cv::result set_float64(parameter_t param, double const value)
{
return _nvar->NvAR_SetF64(_fx.get(), param, value);
}
inline cv::result get_float64(parameter_t param, double* value)
{
return _nvar->NvAR_GetF64(_fx.get(), param, value);
}
public /* String */:
inline cv::result set_string(parameter_t param, const char* const value)
{
return _nvar->NvAR_SetString(_fx.get(), param, value);
};
inline cv::result get_string(parameter_t param, const char*& value)
{
return _nvar->NvAR_GetString(_fx.get(), param, &value);
};
inline cv::result set_string(parameter_t param, const char8_t* const value)
{
return _nvar->NvAR_SetString(_fx.get(), param, reinterpret_cast<const char*>(value));
};
inline cv::result get_string(parameter_t param, const char8_t*& value)
{
return _nvar->NvAR_GetString(_fx.get(), param, reinterpret_cast<const char**>(&value));
};
inline cv::result set_string(parameter_t param, std::string_view const value)
{
return _nvar->NvAR_SetString(_fx.get(), param, value.data());
};
cv::result get(parameter_t param, std::string_view& value);
inline cv::result set_string(parameter_t param, std::string const& value)
{
return _nvar->NvAR_SetString(_fx.get(), param, value.c_str());
};
cv::result get(parameter_t param, std::string& value);
inline cv::result set_string(parameter_t param, std::u8string const& value)
{
return _nvar->NvAR_SetString(_fx.get(), param, reinterpret_cast<const char*>(value.c_str()));
};
cv::result get(parameter_t param, std::u8string& value);
public /* CUDA Stream */:
inline cv::result set_cuda_stream(parameter_t param, cuda::stream_t const value)
{
return _nvar->NvAR_SetCudaStream(_fx.get(), param, value);
};
inline cv::result get_cuda_stream(parameter_t param, cuda::stream_t& value)
{
return _nvar->NvAR_GetCudaStream(_fx.get(), param, &value);
};
inline cv::result set_cuda_stream(parameter_t param, std::shared_ptr<::streamfx::nvidia::cuda::stream> const value)
{
return _nvar->NvAR_SetCudaStream(_fx.get(), param, value->get());
}
//inline cv::result get(parameter_t param, std::shared_ptr<::streamfx::nvidia::cuda::stream> value);
public /* CV Image */:
inline cv::result set_image(parameter_t param, cv::image_t& value)
{
return _nvar->NvAR_SetObject(_fx.get(), param, &value, sizeof(cv::image_t));
};
inline cv::result get_image(parameter_t param, cv::image_t*& value)
{
return _nvar->NvAR_GetObject(_fx.get(), param, reinterpret_cast<object_t*>(&value), sizeof(cv::image_t));
};
inline cv::result set_image(parameter_t param, std::shared_ptr<cv::image> const value)
{
return _nvar->NvAR_SetObject(_fx.get(), param, value->get_image(), sizeof(cv::image_t));
};
//inline cv::result get(parameter_t param, std::shared_ptr<cv::image>& value);
public /* CV Texture */:
inline cv::result set_image(parameter_t param, std::shared_ptr<cv::texture> const value)
{
return _nvar->NvAR_SetObject(_fx.get(), param, value->get_image(), sizeof(cv::image_t));
};
//inline cv::result get(parameter_t param, std::shared_ptr<cv::image>& value);
public /* Objects */:
inline cv::result set_object(parameter_t param, void* const data, size_t size)
{
return _nvar->NvAR_SetObject(_fx.get(), param, data, static_cast<uint32_t>(size));
}
inline cv::result get_object(parameter_t param, void*& data, size_t size)
{
return _nvar->NvAR_GetObject(_fx.get(), param, &data, static_cast<uint32_t>(size));
}
public /* Control */:
inline cv::result load()
{
return _nvar->NvAR_Load(_fx.get());
}
inline cv::result run()
{
return _nvar->NvAR_Run(_fx.get());
}
};
} // namespace streamfx::nvidia::ar

View File

@ -0,0 +1,165 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "nvidia/cv/nvidia-cv.hpp"
#include "warning-disable.hpp"
#include <cinttypes>
#include "warning-enable.hpp"
#define P_NVAR_DEFINE_FUNCTION(name, ...) \
private: \
typedef ::streamfx::nvidia::cv::result (*t##name)(__VA_ARGS__); \
\
public: \
t##name name = nullptr;
#define P_NVAR_INPUT "NvAR_Parameter_Input_"
#define P_NVAR_OUTPUT "NvAR_Parameter_Output_"
#define P_NVAR_CONFIG "NvAR_Parameter_Config_"
/*
* Config Parameters:
* P_NVAR_CONFIG "BatchSize"
* P_NVAR_CONFIG "UseCudaGraph"
* P_NVAR_CONFIG "CUDAStream"
* P_NVAR_CONFIG "ExpressionCount"
* P_NVAR_CONFIG "FeatureDescription"
* P_NVAR_CONFIG "FocalLength"
* P_NVAR_CONFIG "GPU"
* P_NVAR_CONFIG "Landmarks_Size"
* P_NVAR_CONFIG "LandmarksConfidence_Size"
* P_NVAR_CONFIG "Mode"
* P_NVAR_CONFIG "TRTModelDir"
* P_NVAR_CONFIG "ModelDir"
* P_NVAR_CONFIG "ModelName"
* P_NVAR_CONFIG "NumKeyPoints"
* P_NVAR_CONFIG "ReferencePose"
* P_NVAR_CONFIG "ShapeEigenValueCount"
* P_NVAR_CONFIG "Temporal"
* P_NVAR_CONFIG "TriangleCount"
* P_NVAR_CONFIG "VertexCount"
*
* Input Parameters:
* P_NVAR_INPUT "Image"
* P_NVAR_INPUT "Width"
* P_NVAR_INPUT "Height"
* P_NVAR_INPUT "BoundingBoxes"
* P_NVAR_INPUT "BoundingBoxesConfidence"
* P_NVAR_INPUT "Landmarks"
*
* Output Parameters
* P_NVAR_OUTPUT "BoundingBoxes"
* P_NVAR_OUTPUT "BoundingBoxesConfidence"
* P_NVAR_OUTPUT "ExpressionCoefficients"
* P_NVAR_OUTPUT "FaceMesh"
* P_NVAR_OUTPUT "JointAngles"
* P_NVAR_OUTPUT "KeyPoints"
* P_NVAR_OUTPUT "KeyPoints3D"
* P_NVAR_OUTPUT "KeyPointsConfidence"
* P_NVAR_OUTPUT "Landmarks"
* P_NVAR_OUTPUT "LandmarksConfidence"
* P_NVAR_OUTPUT "Pose"
* P_NVAR_OUTPUT "RenderingParams"
* P_NVAR_OUTPUT "ShapeEigenValues"
*/
namespace streamfx::nvidia::ar {
typedef const char* feature_t;
typedef const char* parameter_t;
typedef void* object_t;
typedef void* handle_t;
static constexpr feature_t FEATURE_BODY_DETECTION = "BodyDetection";
static constexpr feature_t FEATURE_BODY_POSE_ESTIMATION = "BodyPoseEstimation";
static constexpr feature_t FEATURE_FACE_DETECTION = "FaceDetection";
static constexpr feature_t FEATURE_FACE_BOX_DETECTION = "FaceBoxDetection";
static constexpr feature_t FEATURE_FACE_RECONSTRUCTION = "Face3DReconstruction";
static constexpr feature_t FEATURE_LANDMARK_DETECTION = "LandMarkDetection";
template<typename T>
struct vec2 {
T x;
T y;
};
template<typename T>
struct vec3 : public vec2<T> {
T z;
};
template<typename T>
struct vec4 : public vec3<T> {
T w;
};
typedef vec2<float> point_t;
typedef vec4<float> frustum_t;
typedef vec4<float> quaternion_t;
typedef vec4<float> rect_t;
struct bounds_t {
rect_t* rects;
uint8_t current;
uint8_t maximum;
};
struct face_mesh_t {
vec3<float>* vertices;
size_t num_vertices;
vec3<uint16_t> indices;
size_t num_indices;
};
struct rendering_params_t {
frustum_t frustum;
quaternion_t rotation;
vec3<float> translation;
};
class ar {
std::shared_ptr<::streamfx::util::library> _library;
std::filesystem::path _model_path;
#ifdef WIN32
void* _extra;
#endif
public:
~ar();
ar();
std::filesystem::path const& get_model_path();
public:
P_NVAR_DEFINE_FUNCTION(NvAR_GetVersion, uint32_t* version);
P_NVAR_DEFINE_FUNCTION(NvAR_Create, feature_t feature_id, handle_t* ptr);
P_NVAR_DEFINE_FUNCTION(NvAR_Destroy, handle_t ptr);
P_NVAR_DEFINE_FUNCTION(NvAR_Run, handle_t ptr);
P_NVAR_DEFINE_FUNCTION(NvAR_Load, handle_t ptr);
P_NVAR_DEFINE_FUNCTION(NvAR_GetS32, handle_t ptr, parameter_t parameter, int32_t* value);
P_NVAR_DEFINE_FUNCTION(NvAR_SetS32, handle_t ptr, parameter_t parameter, int32_t value);
P_NVAR_DEFINE_FUNCTION(NvAR_GetU32, handle_t ptr, parameter_t parameter, uint32_t* value);
P_NVAR_DEFINE_FUNCTION(NvAR_SetU32, handle_t ptr, parameter_t parameter, uint32_t value);
P_NVAR_DEFINE_FUNCTION(NvAR_GetU64, handle_t ptr, parameter_t parameter, uint64_t* value);
P_NVAR_DEFINE_FUNCTION(NvAR_SetU64, handle_t ptr, parameter_t parameter, uint64_t value);
P_NVAR_DEFINE_FUNCTION(NvAR_GetF32, handle_t ptr, parameter_t parameter, float* value);
P_NVAR_DEFINE_FUNCTION(NvAR_SetF32, handle_t ptr, parameter_t parameter, float value);
P_NVAR_DEFINE_FUNCTION(NvAR_GetF64, handle_t ptr, parameter_t parameter, double* value);
P_NVAR_DEFINE_FUNCTION(NvAR_SetF64, handle_t ptr, parameter_t parameter, double value);
P_NVAR_DEFINE_FUNCTION(NvAR_GetString, handle_t ptr, parameter_t parameter, const char** value);
P_NVAR_DEFINE_FUNCTION(NvAR_SetString, handle_t ptr, parameter_t parameter, const char* value);
P_NVAR_DEFINE_FUNCTION(NvAR_GetCudaStream, handle_t ptr, parameter_t parameter, ::streamfx::nvidia::cuda::stream_t* value);
P_NVAR_DEFINE_FUNCTION(NvAR_SetCudaStream, handle_t ptr, parameter_t parameter, ::streamfx::nvidia::cuda::stream_t value);
P_NVAR_DEFINE_FUNCTION(NvAR_GetObject, handle_t ptr, parameter_t parameter, object_t* value, uint32_t size);
P_NVAR_DEFINE_FUNCTION(NvAR_SetObject, handle_t ptr, parameter_t parameter, object_t value, uint32_t size);
P_NVAR_DEFINE_FUNCTION(NvAR_GetF32Array, handle_t ptr, parameter_t parameter, const float** values, int32_t* size);
P_NVAR_DEFINE_FUNCTION(NvAR_SetF32Array, handle_t ptr, parameter_t parameter, const float* values, int32_t size);
public:
static std::shared_ptr<::streamfx::nvidia::ar::ar> get();
};
} // namespace streamfx::nvidia::ar

View File

@ -0,0 +1,57 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2017-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "nvidia-cuda.hpp"
#include "warning-disable.hpp"
#include <memory>
#include "warning-enable.hpp"
namespace streamfx::nvidia::cuda {
class context_stack;
class context : public std::enable_shared_from_this<::streamfx::nvidia::cuda::context> {
std::shared_ptr<::streamfx::nvidia::cuda::cuda> _cuda;
::streamfx::nvidia::cuda::context_t _ctx;
bool _has_device;
::streamfx::nvidia::cuda::device_t _device;
public:
~context();
private:
context();
public:
#ifdef WIN32
context(ID3D11Device* device);
#endif
::streamfx::nvidia::cuda::context_t get();
void push();
void pop();
void synchronize();
public:
std::shared_ptr<::streamfx::nvidia::cuda::context_stack> enter();
};
class context_stack {
std::shared_ptr<::streamfx::nvidia::cuda::context> _ctx;
public:
inline ~context_stack()
{
_ctx->pop();
}
inline context_stack(std::shared_ptr<::streamfx::nvidia::cuda::context> ctx) : _ctx(std::move(ctx))
{
_ctx->push();
}
};
} // namespace streamfx::nvidia::cuda

View File

@ -0,0 +1,35 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2017-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "nvidia-cuda-stream.hpp"
#include "nvidia-cuda.hpp"
#include "obs/gs/gs-texture.hpp"
#include "warning-disable.hpp"
#include <cstddef>
#include <memory>
#include "warning-enable.hpp"
namespace streamfx::nvidia::cuda {
class gstexture {
std::shared_ptr<::streamfx::nvidia::cuda::cuda> _cuda;
std::shared_ptr<streamfx::obs::gs::texture> _texture;
graphics_resource_t _resource;
bool _is_mapped;
array_t _pointer;
std::shared_ptr<streamfx::nvidia::cuda::stream> _stream;
public:
~gstexture();
gstexture(std::shared_ptr<streamfx::obs::gs::texture> texture);
array_t map(std::shared_ptr<streamfx::nvidia::cuda::stream> stream);
void unmap();
std::shared_ptr<streamfx::obs::gs::texture> get_texture();
::streamfx::nvidia::cuda::graphics_resource_t get();
};
} // namespace streamfx::nvidia::cuda

View File

@ -0,0 +1,27 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2017-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "nvidia-cuda.hpp"
#include "warning-disable.hpp"
#include <cstddef>
#include <memory>
#include "warning-enable.hpp"
namespace streamfx::nvidia::cuda {
class memory {
std::shared_ptr<::streamfx::nvidia::cuda::cuda> _cuda;
device_ptr_t _pointer;
size_t _size;
public:
~memory();
memory(size_t size);
device_ptr_t get();
std::size_t size();
};
} // namespace streamfx::nvidia::cuda

View File

@ -0,0 +1,31 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2017-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "nvidia-cuda-context.hpp"
#include "nvidia-cuda-stream.hpp"
#include "nvidia-cuda.hpp"
#include "warning-disable.hpp"
#include <memory>
#include "warning-enable.hpp"
namespace streamfx::nvidia::cuda {
class obs {
std::shared_ptr<::streamfx::nvidia::cuda::cuda> _cuda;
std::shared_ptr<::streamfx::nvidia::cuda::context> _context;
std::shared_ptr<::streamfx::nvidia::cuda::stream> _stream;
public:
~obs();
obs();
std::shared_ptr<::streamfx::nvidia::cuda::cuda> get_cuda();
std::shared_ptr<::streamfx::nvidia::cuda::context> get_context();
std::shared_ptr<::streamfx::nvidia::cuda::stream> get_stream();
public:
static std::shared_ptr<::streamfx::nvidia::cuda::obs> get();
};
} // namespace streamfx::nvidia::cuda

View File

@ -0,0 +1,25 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2017-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "nvidia-cuda.hpp"
#include "warning-disable.hpp"
#include <memory>
#include "warning-enable.hpp"
namespace streamfx::nvidia::cuda {
class stream {
std::shared_ptr<::streamfx::nvidia::cuda::cuda> _cuda;
::streamfx::nvidia::cuda::stream_t _stream;
public:
~stream();
stream(::streamfx::nvidia::cuda::stream_flags flags = ::streamfx::nvidia::cuda::stream_flags::DEFAULT, int32_t priority = 0);
::streamfx::nvidia::cuda::stream_t get();
void synchronize();
};
} // namespace streamfx::nvidia::cuda

View File

@ -0,0 +1,339 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "util/util-bitmask.hpp"
#include "util/util-library.hpp"
#include "warning-disable.hpp"
#include <cstddef>
#include <tuple>
#include "warning-enable.hpp"
#ifdef WIN32
#include "warning-disable.hpp"
#include <d3d11.h>
#include <dxgi.h>
#include "warning-enable.hpp"
#endif
#define P_CUDA_DEFINE_FUNCTION(name, ...) \
private: \
typedef ::streamfx::nvidia::cuda::result (*t##name)(__VA_ARGS__); \
\
public: \
t##name name = nullptr;
namespace streamfx::nvidia::cuda {
enum class result : std::size_t {
SUCCESS = 0,
INVALID_VALUE = 1,
OUT_OF_MEMORY = 2,
NOT_INITIALIZED = 3,
DEINITIALIZED = 4,
NO_DEVICE = 100,
INVALID_DEVICE = 101,
INVALID_CONTEXT = 201,
MAP_FAILED = 205,
UNMAP_FAILED = 206,
ARRAY_IS_MAPPED = 207,
ALREADY_MAPPED = 208,
NOT_MAPPED = 211,
INVALID_GRAPHICS_CONTEXT = 219,
// Still missing some.
};
enum class memory_type : uint32_t {
HOST = 1,
DEVICE = 2,
ARRAY = 3,
UNIFIED = 4,
};
enum class array_format : uint32_t {
UNSIGNED_INT8 = 0b00000001,
UNSIGNED_INT16 = 0b00000010,
UNSIGNED_INT32 = 0b00000011,
SIGNED_INT8 = 0b00001000,
SIGNED_INT16 = 0b00001001,
SIGNED_INT32 = 0b00001010,
HALF = 0b00010000,
FLOAT = 0b00100000,
};
enum class context_flags : uint32_t {
SCHEDULER_AUTO = 0x0,
SCHEDULER_SPIN = 0x1,
SCHEDULER_YIELD = 0x2,
SCHEDULER_BLOCKING_SYNC = 0x4,
MAP_HOST = 0x8,
LOCAL_MEMORY_RESIZE_TO_MAXIMUM = 0x10,
};
enum class external_memory_handle_type : uint32_t {
INVALID = 0,
FILE_DESCRIPTOR = 1,
WIN32_SHARED_HANDLE = 2,
WIN32_GLOBAL_SHARED_HANDLE = 3,
D3D12_HEAP = 4,
D3D12_RESOURCE = 5,
D3D11_SHARED_RESOURCE = 6,
D3D11_GLOBAL_SHARED_RESOURCE = 7,
NVSCIBUF = 8,
};
enum class stream_flags : uint32_t {
DEFAULT = 0x0,
NON_BLOCKING = 0x1,
};
typedef void* array_t;
typedef void* context_t;
typedef uint64_t device_ptr_t;
typedef void* external_memory_t;
typedef void* graphics_resource_t;
typedef void* stream_t;
typedef int32_t device_t;
struct memcpy2d_v2_t {
std::size_t src_x_in_bytes;
std::size_t src_y;
memory_type src_memory_type;
const void* src_host;
device_ptr_t src_device;
array_t src_array;
std::size_t src_pitch;
std::size_t dst_x_in_bytes;
std::size_t dst_y;
memory_type dst_memory_type;
const void* dst_host;
device_ptr_t dst_device;
array_t dst_array;
std::size_t dst_pitch;
std::size_t width_in_bytes;
std::size_t height;
};
struct array_descriptor_v2_t {
std::size_t width;
std::size_t height;
uint32_t num_channels;
array_format format;
};
struct external_memory_buffer_info_v1_t {
uint64_t offset;
uint64_t size;
uint32_t flags;
uint32_t reserved[16];
};
struct external_memory_handle_info_v1_t {
external_memory_handle_type type;
union {
int32_t file;
struct {
void* handle;
const void* name;
};
const void* nvscibuf;
};
uint64_t size;
uint32_t flags;
uint32_t reserved[16];
};
struct uuid_t {
union {
char bytes[16];
struct {
uint32_t a;
uint16_t b;
uint16_t c;
uint16_t d;
uint16_t e;
uint32_t f;
} uuid;
};
};
struct luid_t {
union {
char bytes[8];
struct {
uint32_t low;
int32_t high;
} parts;
uint64_t luid;
};
};
class cuda_error : public std::exception {
::streamfx::nvidia::cuda::result _code;
public:
~cuda_error(){};
cuda_error(::streamfx::nvidia::cuda::result code) : _code(code) {}
::streamfx::nvidia::cuda::result code()
{
return _code;
}
};
class cuda {
std::shared_ptr<streamfx::util::library> _library;
public:
~cuda();
cuda();
int32_t version();
public:
// Initialization
P_CUDA_DEFINE_FUNCTION(cuInit, int32_t flags);
// Version Management
P_CUDA_DEFINE_FUNCTION(cuDriverGetVersion, int32_t* driverVersion);
// Device Management
P_CUDA_DEFINE_FUNCTION(cuDeviceGetName, char* name, int32_t length, device_t device);
P_CUDA_DEFINE_FUNCTION(cuDeviceGetLuid, luid_t* luid, uint32_t* device_node_mask, device_t device);
P_CUDA_DEFINE_FUNCTION(cuDeviceGetUuid, uuid_t* uuid, device_t device);
// - Not yet needed.
// Primary Context Management
P_CUDA_DEFINE_FUNCTION(cuDevicePrimaryCtxRelease, device_t device);
P_CUDA_DEFINE_FUNCTION(cuDevicePrimaryCtxRetain, context_t* ctx, device_t device);
P_CUDA_DEFINE_FUNCTION(cuDevicePrimaryCtxSetFlags, device_t device, context_flags flags);
// Context Management
P_CUDA_DEFINE_FUNCTION(cuCtxCreate, context_t* ctx, context_flags flags, device_t device);
P_CUDA_DEFINE_FUNCTION(cuCtxDestroy, context_t ctx);
P_CUDA_DEFINE_FUNCTION(cuCtxGetCurrent, context_t* ctx);
P_CUDA_DEFINE_FUNCTION(cuCtxGetStreamPriorityRange, int32_t* lowestPriority, int32_t* highestPriority);
P_CUDA_DEFINE_FUNCTION(cuCtxPopCurrent, context_t* ctx);
P_CUDA_DEFINE_FUNCTION(cuCtxPushCurrent, context_t ctx);
P_CUDA_DEFINE_FUNCTION(cuCtxSetCurrent, context_t ctx);
P_CUDA_DEFINE_FUNCTION(cuCtxSynchronize);
// Module Management
// - Not yet needed.
// Memory Management
P_CUDA_DEFINE_FUNCTION(cuArrayGetDescriptor, array_descriptor_v2_t* pArrayDescripter, array_t array);
P_CUDA_DEFINE_FUNCTION(cuMemAlloc, device_ptr_t* ptr, std::size_t bytes);
P_CUDA_DEFINE_FUNCTION(cuMemAllocPitch, device_ptr_t* ptr, std::size_t* pitch, std::size_t width_in_bytes, std::size_t height, uint32_t element_size_bytes);
P_CUDA_DEFINE_FUNCTION(cuMemFree, device_ptr_t ptr);
P_CUDA_DEFINE_FUNCTION(cuMemHostGetDevicePointer, device_ptr_t* devptr, void* ptr, uint32_t flags);
P_CUDA_DEFINE_FUNCTION(cuMemcpy, device_ptr_t dst, device_ptr_t src, std::size_t bytes);
P_CUDA_DEFINE_FUNCTION(cuMemcpy2D, const memcpy2d_v2_t* copy);
P_CUDA_DEFINE_FUNCTION(cuMemcpy2DAsync, const memcpy2d_v2_t* copy, stream_t stream);
P_CUDA_DEFINE_FUNCTION(cuMemcpyAtoA, array_t dst, std::size_t dstOffset, array_t src, std::size_t srcOffset, std::size_t byteCount);
P_CUDA_DEFINE_FUNCTION(cuMemcpyAtoD, device_ptr_t dst, array_t src, std::size_t srcOffset, std::size_t byteCount);
P_CUDA_DEFINE_FUNCTION(cuMemcpyAtoH, void* dst, array_t src, std::size_t srcOffset, std::size_t byteCount);
P_CUDA_DEFINE_FUNCTION(cuMemcpyAtoHAsync, void* dst, array_t src, std::size_t srcOffset, std::size_t byteCount);
P_CUDA_DEFINE_FUNCTION(cuMemcpyDtoA, array_t dst, std::size_t dstOffset, device_ptr_t src, std::size_t byteCount);
P_CUDA_DEFINE_FUNCTION(cuMemcpyDtoD, device_ptr_t dst, array_t srcArray, std::size_t byteCount);
P_CUDA_DEFINE_FUNCTION(cuMemcpyDtoH, void* dst, array_t src, std::size_t byteCount);
P_CUDA_DEFINE_FUNCTION(cuMemcpyDtoHAsync, void* dst, array_t src, std::size_t byteCount);
P_CUDA_DEFINE_FUNCTION(cuMemcpyHtoA, array_t dst, std::size_t dstOffset, void* src, std::size_t byteCount);
P_CUDA_DEFINE_FUNCTION(cuMemcpyHtoAAsync, array_t dst, std::size_t dstOffset, void* src, std::size_t byteCount);
P_CUDA_DEFINE_FUNCTION(cuMemcpyHtoD, device_ptr_t dst, void* src, std::size_t byteCount);
P_CUDA_DEFINE_FUNCTION(cuMemcpyHtoDAsync, device_ptr_t dst, void* src, std::size_t byteCount);
P_CUDA_DEFINE_FUNCTION(cuMemsetD8, device_ptr_t dst, uint8_t d, size_t byteCount);
P_CUDA_DEFINE_FUNCTION(cuMemsetD8Async, device_ptr_t dst, uint8_t d, size_t byteCount, stream_t stream);
P_CUDA_DEFINE_FUNCTION(cuMemsetD16, device_ptr_t dst, uint16_t d, size_t byteCount);
P_CUDA_DEFINE_FUNCTION(cuMemsetD16Async, device_ptr_t dst, uint16_t d, size_t byteCount, stream_t stream);
P_CUDA_DEFINE_FUNCTION(cuMemsetD32, device_ptr_t dst, uint32_t d, size_t byteCount);
P_CUDA_DEFINE_FUNCTION(cuMemsetD32Async, device_ptr_t dst, uint32_t d, size_t byteCount, stream_t stream);
// Virtual Memory Management
// - Not yet needed.
// Stream Ordered Memory Allocator
// - Not yet needed.
// Unified Addressing
// - Not yet needed.
// Stream Managment
P_CUDA_DEFINE_FUNCTION(cuStreamCreate, stream_t* stream, stream_flags flags);
P_CUDA_DEFINE_FUNCTION(cuStreamCreateWithPriority, stream_t* stream, stream_flags flags, int32_t priority);
P_CUDA_DEFINE_FUNCTION(cuStreamDestroy, stream_t stream);
P_CUDA_DEFINE_FUNCTION(cuStreamSynchronize, stream_t stream);
P_CUDA_DEFINE_FUNCTION(cuStreamGetPriority, stream_t stream, int32_t* priority);
// Event Management
// - Not yet needed.
// External Resource Interoperability (CUDA 11.1+)
// - Not yet needed.
// Stream Memory Operations
// - Not yet needed.
// Execution Control
// - Not yet needed.
// Graph Management
// - Not yet needed.
// Occupancy
// - Not yet needed.
// Texture Object Management
// - Not yet needed.
// Surface Object Management
// - Not yet needed.
// Peer Context Memory Access
// - Not yet needed.
// Graphics Interoperability
P_CUDA_DEFINE_FUNCTION(cuGraphicsMapResources, uint32_t count, graphics_resource_t* resources, stream_t stream);
P_CUDA_DEFINE_FUNCTION(cuGraphicsSubResourceGetMappedArray, array_t* array, graphics_resource_t resource, uint32_t index, uint32_t level);
P_CUDA_DEFINE_FUNCTION(cuGraphicsUnmapResources, uint32_t count, graphics_resource_t* resources, stream_t stream);
P_CUDA_DEFINE_FUNCTION(cuGraphicsUnregisterResource, graphics_resource_t resource);
// Driver Entry Point Access
// - Not yet needed.
// Profiler Control
// - Not yet needed.
// OpenGL Interoperability
// - Not yet needed.
// VDPAU Interoperability
// - Not yet needed.
// EGL Interoperability
// - Not yet needed.
#ifdef WIN32
// Direct3D9 Interoperability
// - Not yet needed.
// Direct3D10 Interoperability
P_CUDA_DEFINE_FUNCTION(cuD3D10GetDevice, device_t* device, IDXGIAdapter* adapter);
P_CUDA_DEFINE_FUNCTION(cuGraphicsD3D10RegisterResource, graphics_resource_t* resource, ID3D10Resource* d3dresource, uint32_t flags);
// Direct3D11 Interoperability
P_CUDA_DEFINE_FUNCTION(cuD3D11GetDevice, device_t* device, IDXGIAdapter* adapter);
P_CUDA_DEFINE_FUNCTION(cuGraphicsD3D11RegisterResource, graphics_resource_t* resource, ID3D11Resource* d3dresource, uint32_t flags);
#endif
public:
static std::shared_ptr<::streamfx::nvidia::cuda::cuda> get();
};
} // namespace streamfx::nvidia::cuda
P_ENABLE_BITMASK_OPERATORS(::streamfx::nvidia::cuda::context_flags)
P_ENABLE_BITMASK_OPERATORS(::streamfx::nvidia::cuda::stream_flags)

View File

@ -0,0 +1,40 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "nvidia/cv/nvidia-cv.hpp"
#include "warning-disable.hpp"
#include <cinttypes>
#include "warning-enable.hpp"
namespace streamfx::nvidia::cv {
using ::streamfx::nvidia::cv::component_layout;
using ::streamfx::nvidia::cv::component_type;
using ::streamfx::nvidia::cv::memory_location;
using ::streamfx::nvidia::cv::pixel_format;
class image {
protected:
std::shared_ptr<::streamfx::nvidia::cv::cv> _cv;
image_t _image;
uint32_t _alignment;
public:
virtual ~image();
protected:
image();
public:
image(uint32_t width, uint32_t height, pixel_format pix_fmt, component_type cmp_type, component_layout cmp_layout, memory_location location, uint32_t alignment);
virtual void reallocate(uint32_t width, uint32_t height, pixel_format pix_fmt, component_type cmp_type, component_layout cmp_layout, memory_location location, uint32_t alignment);
virtual void resize(uint32_t width, uint32_t height);
virtual ::streamfx::nvidia::cv::image_t* get_image();
};
} // namespace streamfx::nvidia::cv

View File

@ -0,0 +1,36 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2020-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "nvidia/cv/nvidia-cv-image.hpp"
#include "obs/gs/gs-texture.hpp"
#include "warning-disable.hpp"
#include <cinttypes>
#include "warning-enable.hpp"
namespace streamfx::nvidia::cv {
using ::streamfx::nvidia::cv::component_layout;
using ::streamfx::nvidia::cv::component_type;
using ::streamfx::nvidia::cv::image;
using ::streamfx::nvidia::cv::memory_location;
using ::streamfx::nvidia::cv::pixel_format;
class texture : public image {
std::shared_ptr<::streamfx::obs::gs::texture> _texture;
public:
~texture() override;
texture(uint32_t width, uint32_t height, gs_color_format pix_fmt);
void resize(uint32_t width, uint32_t height) override;
std::shared_ptr<::streamfx::obs::gs::texture> get_texture();
private:
void alloc();
void free();
};
} // namespace streamfx::nvidia::cv

View File

@ -0,0 +1,261 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2021-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// Copyright (C) 2022 lainon <GermanAizek@yandex.ru>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "nvidia/cuda/nvidia-cuda.hpp"
#include "util/util-bitmask.hpp"
#include "util/util-library.hpp"
#include "warning-disable.hpp"
#include <cinttypes>
#ifdef WIN32
#include <d3d11.h>
#include <dxgi.h>
#endif
#include "warning-enable.hpp"
#define NVCVI_DEFINE_FUNCTION(name, ...) \
private: \
typedef ::streamfx::nvidia::cv::result(__cdecl* t##name)(__VA_ARGS__); \
\
public: \
t##name name = nullptr;
#define NVCVI_DEFINE_FUNCTION_EX(ret, name, ...) \
private: \
typedef ret(__cdecl* t##name)(__VA_ARGS__); \
\
public: \
t##name name = nullptr;
namespace streamfx::nvidia::cv {
enum class result {
// NVIDIA uses negative codes, but we use positive.
SUCCESS = 0,
ERROR_GENERAL = -1,
ERROR_UNIMPLEMENTED = -2,
ERROR_MEMORY = -3,
ERROR_EFFECT = -4,
ERROR_SELECTOR = -5,
ERROR_BUFFER = -6,
ERROR_PARAMETER = -7,
ERROR_MISMATCH = -8,
ERROR_PIXELFORMAT = -9,
ERROR_MODEL = -10,
ERROR_LIBRARY = -11,
ERROR_INITIALIZATION = -12,
ERROR_FILE = -13,
ERROR_FEATURENOTFOUND = -14,
ERROR_MISSINGINPUT = -15,
ERROR_RESOLUTION = -16,
ERROR_UNSUPPORTEDGPU = -17,
ERROR_WRONGGPU = -18,
ERROR_UNSUPPORTEDDRIVER = -19,
ERROR_MODELDEPENDENCIES = -20,
ERROR_PARSE = -21,
ERROR_MODELSUBSTITUTION = -22,
ERROR_READ = -23,
ERROR_WRITE = -24,
ERROR_PARAMREADONLY = -25,
ERROR_TRT_ENQUEUE = -26,
ERROR_TRT_BINDINGS = -27,
ERROR_TRT_CONTEXT = -28,
ERROR_TRT_INFER = -29,
ERROR_TRT_ENGINE = -30,
ERROR_NPP = -31,
ERROR_CONFIG = -32,
// Error from Graphics API
ERROR_DIRECT3D = -99,
// Error from CUDA
ERROR_CUDA_BASE = -100,
ERROR_CUDA_VALUE = -101,
ERROR_CUDA_MEMORY = -102,
ERROR_CUDA_PITCH = -112,
ERROR_CUDA_INIT = -127,
ERROR_CUDA_LAUNCH = -819,
ERROR_CUDA_KERNEL = -309,
ERROR_CUDA_DRIVER = -135,
ERROR_CUDA_UNSUPPORTED = -901,
ERROR_CUDA_ILLEGAL_ADDRESS = -800,
ERROR_CUDA = -1099,
};
enum class pixel_format {
UNKNOWN = 0,
Y = 1,
A = 2,
YA = 3,
RGB = 4,
BGR = 5,
RGBA = 6,
BGRA = 7,
ARGB = 8,
ABGR = 9,
YUV420 = 10,
YUV422 = 11,
YUV444 = 12,
};
enum class component_type {
UKNOWN = 0,
UINT8 = 1,
UINT16 = 2,
SINT16 = 3,
FP16 = 4,
UINT32 = 5,
SINT = 6,
FP32 = 7,
UINT64 = 8,
SINT64 = 9,
FP64 = 10,
};
enum class component_layout {
INTERLEAVED = 0,
PLANAR = 1,
UYVY = 2,
YUV = 3,
VYUY = 4,
YVU = 5,
YUYV = 6,
YCUV = 7,
YVYU = 8,
YCVU = 9,
CYUV = 10,
_RESERVED11 = 11,
CYVU = 12,
CHUNKY = INTERLEAVED,
I420 = YUV,
IYUV = YUV,
YV12 = YVU,
NV12 = YCUV,
NV21 = YCVU,
YUY2 = YUYV,
I444 = YUV,
YM24 = YUV,
YM42 = YVU,
NV24 = YCUV,
NV42 = YCVU,
};
enum class color_information {
SPACE_BT_601 = 0x00,
SPACE_BT_709 = 0x01,
SPACE_BT_2020 = 0x02,
RANGE_PARTIAL = 0x00,
RANGE_FULL = 0x04,
CHROMA_LOCATION_COSITED = 0x00,
CHROMA_LOCATION_INTERSTITIAL = 0x08,
CHROMA_LOCATION_TOPLEFT = 0x10,
};
enum class memory_location {
CPU = 0,
GPU = 1,
CPU_PINNED = 2,
CUDA_ARRAY = 3,
};
struct image_t {
uint32_t width;
uint32_t height;
int32_t pitch;
pixel_format pxl_format;
component_type comp_type;
uint8_t pixel_bytes;
uint8_t component_bytes;
uint8_t num_components;
unsigned char comp_layout;
unsigned char mem_location;
unsigned char color_info;
uint8_t reserved[2];
void* pixels;
void* delete_pointer;
void (*delete_function)(void* delete_pointer);
uint64_t buffer_bytes;
};
template<typename T>
struct point {
T x, y;
};
template<typename T>
struct rect {
T x, y;
T w, h;
};
class cv {
std::shared_ptr<::streamfx::util::library> _library;
#ifdef WIN32
void* _extra;
#endif
public:
~cv();
cv();
public:
NVCVI_DEFINE_FUNCTION(NvCVImage_Init, image_t* image, uint32_t width, uint32_t height, uint32_t pitch, void* pixels, pixel_format format, component_type comp_type, component_layout comp_layout, memory_location mem_location);
NVCVI_DEFINE_FUNCTION(NvCVImage_InitView, image_t* sub_image, image_t* image, int32_t x, int32_t y, uint32_t width, uint32_t height);
NVCVI_DEFINE_FUNCTION(NvCVImage_Alloc, image_t* image, uint32_t width, uint32_t height, pixel_format format, component_type comp_type, uint32_t comp_layout, uint32_t mem_location, uint32_t alignment);
NVCVI_DEFINE_FUNCTION(NvCVImage_Realloc, image_t* image, uint32_t width, uint32_t height, pixel_format format, component_type comp_type, uint32_t comp_layout, uint32_t mem_location, uint32_t alignment);
NVCVI_DEFINE_FUNCTION_EX(void, NvCVImage_Dealloc, image_t* image);
NVCVI_DEFINE_FUNCTION(NvCVImage_Create, uint32_t width, uint32_t height, pixel_format format, component_type comp_type, component_layout comp_layout, memory_location mem_location, uint32_t alignment, image_t** image);
NVCVI_DEFINE_FUNCTION_EX(void, NvCVImage_Destroy, image_t* image);
NVCVI_DEFINE_FUNCTION_EX(void, NvCVImage_ComponentOffsets, pixel_format format, int32_t* red_offset, int32_t* green_offset, int32_t* blue_offset, int32_t* alpha_offset, int32_t* y_offset);
NVCVI_DEFINE_FUNCTION(NvCVImage_Transfer, const image_t* source, image_t* destination, float scale, ::streamfx::nvidia::cuda::stream_t stream, image_t* buffer);
NVCVI_DEFINE_FUNCTION(NvCVImage_TransferRect, const image_t* source, const rect<int32_t>* source_rect, image_t* destination, const point<int32_t>* destination_point, float scale, ::streamfx::nvidia::cuda::stream_t stream, image_t* buffer);
NVCVI_DEFINE_FUNCTION(NvCVImage_TransferFromYUV, const void* y, int32_t yPixBytes, int32_t yPitch, const void* u, const void* v, int32_t uvPixBytes, int32_t uvPitch, pixel_format yuvFormat, component_type yuvType, color_information yuvColorSpace, memory_location yuvMemSpace, image_t* destination, const rect<int32_t>* destination_area, float scale, ::streamfx::nvidia::cuda::stream_t stream, image_t* tmp);
NVCVI_DEFINE_FUNCTION(NvCVImage_TransferToYUV, const image_t* source, const rect<int32_t>* source_area, const void* y, int32_t yPixBytes, int32_t yPitch, const void* u, const void* v, int uvPixBytes, int32_t uvPitch, pixel_format yuvFormat, component_type yuvType, color_information yuvColorSpace, memory_location yuvMemSpace, float scale, ::streamfx::nvidia::cuda::stream_t stream, image_t* tmp);
NVCVI_DEFINE_FUNCTION(NvCVImage_MapResource, image_t* image, ::streamfx::nvidia::cuda::stream_t stream);
NVCVI_DEFINE_FUNCTION(NvCVImage_UnmapResource, image_t* image, ::streamfx::nvidia::cuda::stream_t stream);
NVCVI_DEFINE_FUNCTION(NvCVImage_Composite, const image_t* foreground, const image_t* background, const image_t* matte, image_t* destination, ::streamfx::nvidia::cuda::stream_t stream);
NVCVI_DEFINE_FUNCTION(NvCVImage_CompositeRect, const image_t* foreground, const point<int32_t> foreground_origin, const image_t* background, const point<int32_t> background_origin, const image_t* matte, uint32_t mode, image_t* destination, const point<int32_t> destination_origin, ::streamfx::nvidia::cuda::stream_t stream);
NVCVI_DEFINE_FUNCTION(NvCVImage_CompositeOverConstant, const image_t* source, const image_t* matte, const uint8_t background_color[3], image_t* destination);
NVCVI_DEFINE_FUNCTION(NvCVImage_FlipY, const image_t* source, image_t* destination);
NVCVI_DEFINE_FUNCTION(NvCVImage_GetYUVPointers, image_t* image, uint8_t** y, uint8_t** u, uint8_t** v, int32_t* y_pixel_bytes, int32_t* c_pixel_bytes, int32_t* y_row_bytes, int32_t* c_row_bytes);
NVCVI_DEFINE_FUNCTION_EX(const char*, NvCV_GetErrorStringFromCode, result code);
#ifdef WIN32
NVCVI_DEFINE_FUNCTION(NvCVImage_InitFromD3D11Texture, image_t* image, struct ID3D11Texture2D* texture);
NVCVI_DEFINE_FUNCTION(NvCVImage_ToD3DFormat, pixel_format format, component_type comp_type, component_layout comp_layout, DXGI_FORMAT* dxgi_format);
NVCVI_DEFINE_FUNCTION(NvCVImage_FromD3DFormat, DXGI_FORMAT d3dFormat, pixel_format* format, component_type* comp_type, component_layout* comp_layout);
#ifdef __dxgicommon_h__
NVCVI_DEFINE_FUNCTION(NvCVImage_ToD3DColorSpace, color_information nvcvColorSpace, DXGI_COLOR_SPACE_TYPE* pD3dColorSpace);
NVCVI_DEFINE_FUNCTION(NvCVImage_FromD3DColorSpace, DXGI_COLOR_SPACE_TYPE d3dColorSpace, color_information* pNvcvColorSpace);
#endif
#endif
public:
static std::shared_ptr<::streamfx::nvidia::cv::cv> get();
};
class exception : public std::runtime_error {
result _code;
public:
exception(const char* what, result code) : std::runtime_error(what), _code(code) {}
exception(std::string_view what, result code) : std::runtime_error(what.data()), _code(code) {}
~exception(){};
inline result code()
{
return _code;
}
inline const char* description()
{
return ::streamfx::nvidia::cv::cv::get()->NvCV_GetErrorStringFromCode(_code);
}
};
} // namespace streamfx::nvidia::cv
P_ENABLE_BITMASK_OPERATORS(::streamfx::nvidia::cv::color_information);

View File

@ -0,0 +1,49 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2021-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "nvidia-vfx-effect.hpp"
#include "nvidia-vfx.hpp"
#include "nvidia/cuda/nvidia-cuda-gs-texture.hpp"
#include "nvidia/cuda/nvidia-cuda-obs.hpp"
#include "nvidia/cuda/nvidia-cuda.hpp"
#include "nvidia/cv/nvidia-cv-image.hpp"
#include "nvidia/cv/nvidia-cv-texture.hpp"
#include "obs/gs/gs-texture.hpp"
namespace streamfx::nvidia::vfx {
class denoising : protected effect {
bool _dirty;
std::shared_ptr<::streamfx::nvidia::cv::texture> _input;
std::shared_ptr<::streamfx::nvidia::cv::image> _convert_to_fp32;
std::shared_ptr<::streamfx::nvidia::cv::image> _source;
std::shared_ptr<::streamfx::nvidia::cv::image> _destination;
std::shared_ptr<::streamfx::nvidia::cv::image> _convert_to_u8;
std::shared_ptr<::streamfx::nvidia::cv::texture> _output;
std::shared_ptr<::streamfx::nvidia::cv::image> _tmp;
void* _states[1];
::streamfx::nvidia::cuda::device_ptr_t _state;
uint32_t _state_size;
float _strength;
public:
~denoising();
denoising();
void set_strength(float strength);
float strength();
void size(std::pair<uint32_t, uint32_t>& size);
std::shared_ptr<::streamfx::obs::gs::texture> process(std::shared_ptr<::streamfx::obs::gs::texture> in);
private:
void resize(uint32_t width, uint32_t height);
void load();
};
} // namespace streamfx::nvidia::vfx

View File

@ -0,0 +1,190 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2021-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "nvidia-vfx.hpp"
#include "nvidia/cuda/nvidia-cuda-obs.hpp"
#include "nvidia/cuda/nvidia-cuda-stream.hpp"
#include "nvidia/cuda/nvidia-cuda.hpp"
#include "nvidia/cv/nvidia-cv-image.hpp"
#include "nvidia/cv/nvidia-cv-texture.hpp"
#include "nvidia/cv/nvidia-cv.hpp"
#include "nvidia/vfx/nvidia-vfx.hpp"
#include "warning-disable.hpp"
#include <memory>
#include <string>
#include <string_view>
#include "warning-enable.hpp"
namespace streamfx::nvidia::vfx {
using namespace ::streamfx::nvidia;
class effect {
protected:
std::shared_ptr<cuda::obs> _nvcuda;
std::shared_ptr<cv::cv> _nvcvi;
std::shared_ptr<vfx> _nvvfx;
std::shared_ptr<void> _fx;
std::u8string _model_path;
public:
~effect();
effect(effect_t name);
::streamfx::nvidia::vfx::handle_t get()
{
return _fx.get();
}
public /* Int32 */:
inline cv::result set_uint32(parameter_t param, uint32_t const value)
{
return _nvvfx->NvVFX_SetU32(_fx.get(), param, value);
}
inline cv::result get_uint32(parameter_t param, uint32_t& value)
{
return _nvvfx->NvVFX_GetU32(_fx.get(), param, &value);
}
inline cv::result set_int32(parameter_t param, int32_t const value)
{
return _nvvfx->NvVFX_SetS32(_fx.get(), param, value);
}
inline cv::result get_int32(parameter_t param, int32_t& value)
{
return _nvvfx->NvVFX_GetS32(_fx.get(), param, &value);
}
public /* Int64 */:
inline cv::result set_uint64(parameter_t param, uint64_t const value)
{
return _nvvfx->NvVFX_SetU64(_fx.get(), param, value);
}
inline cv::result get_uint64(parameter_t param, uint64_t& value)
{
return _nvvfx->NvVFX_GetU64(_fx.get(), param, &value);
}
public /* Float32 */:
inline cv::result set_float32(parameter_t param, float const value)
{
return _nvvfx->NvVFX_SetF32(_fx.get(), param, value);
}
inline cv::result get_float32(parameter_t param, float& value)
{
return _nvvfx->NvVFX_GetF32(_fx.get(), param, &value);
}
public /* Float64 */:
inline cv::result set_float64(parameter_t param, double const value)
{
return _nvvfx->NvVFX_SetF64(_fx.get(), param, value);
}
inline cv::result get_float64(parameter_t param, double& value)
{
return _nvvfx->NvVFX_GetF64(_fx.get(), param, &value);
}
public /* String */:
inline cv::result set_string(parameter_t param, const char* const value)
{
return _nvvfx->NvVFX_SetString(_fx.get(), param, value);
}
inline cv::result get_string(parameter_t param, const char*& value)
{
return _nvvfx->NvVFX_GetString(_fx.get(), param, &value);
}
inline cv::result set_string(parameter_t param, const char8_t* const value)
{
return _nvvfx->NvVFX_SetString(_fx.get(), param, reinterpret_cast<const char*>(value));
}
inline cv::result get_string(parameter_t param, const char8_t*& value)
{
return _nvvfx->NvVFX_GetString(_fx.get(), param, reinterpret_cast<const char**>(&value));
}
inline cv::result set_string(parameter_t param, std::string_view const& value)
{
return _nvvfx->NvVFX_SetString(_fx.get(), param, value.data());
}
cv::result get_string(parameter_t param, std::string_view& value);
inline cv::result set_string(parameter_t param, std::string const& value)
{
return _nvvfx->NvVFX_SetString(_fx.get(), param, value.c_str());
}
cv::result get_string(parameter_t param, std::string& value);
inline cv::result set_string(parameter_t param, std::u8string const& value)
{
return _nvvfx->NvVFX_SetString(_fx.get(), param, reinterpret_cast<const char*>(value.c_str()));
}
cv::result get_string(parameter_t param, std::u8string& value);
public /* CUDA Stream */:
inline cv::result set_cuda_stream(parameter_t param, cuda::stream_t const& value)
{
return _nvvfx->NvVFX_SetCudaStream(_fx.get(), param, value);
}
inline cv::result get_cuda_stream(parameter_t param, cuda::stream_t& value)
{
return _nvvfx->NvVFX_GetCudaStream(_fx.get(), param, &value);
}
inline cv::result set_cuda_stream(parameter_t param, std::shared_ptr<cuda::stream> const& value)
{
return _nvvfx->NvVFX_SetCudaStream(_fx.get(), param, value->get());
}
//cv::result get_stream(parameter_t param, std::shared_ptr<cuda::stream>& value);
public /* CV Image */:
inline cv::result set_image(parameter_t param, cv::image_t* value)
{
return _nvvfx->NvVFX_SetImage(_fx.get(), param, value);
}
inline cv::result get_image(parameter_t param, cv::image_t* value)
{
return _nvvfx->NvVFX_GetImage(_fx.get(), param, value);
}
inline cv::result set_image(parameter_t param, std::shared_ptr<cv::image> const& value)
{
return _nvvfx->NvVFX_SetImage(_fx.get(), param, value->get_image());
}
inline cv::result get_image(parameter_t param, std::shared_ptr<cv::image>& value)
{
return _nvvfx->NvVFX_GetImage(_fx.get(), param, value->get_image());
}
public /* CV Texture */:
inline cv::result set_image(parameter_t param, std::shared_ptr<cv::texture> const& value)
{
return _nvvfx->NvVFX_SetImage(_fx.get(), param, value->get_image());
}
//cv::result get(parameter_t param, std::shared_ptr<cv::texture>& value);
public /* Objects */:
inline cv::result set_object(parameter_t param, void* const value)
{
return _nvvfx->NvVFX_SetObject(_fx.get(), param, value);
}
inline cv::result get_object(parameter_t param, void*& value)
{
return _nvvfx->NvVFX_GetObject(_fx.get(), param, &value);
}
public /* Control */:
inline cv::result load()
{
return _nvvfx->NvVFX_Load(_fx.get());
}
inline cv::result run(bool async = false)
{
return _nvvfx->NvVFX_Run(_fx.get(), async ? 1 : 0);
}
};
} // namespace streamfx::nvidia::vfx

View File

@ -0,0 +1,49 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2021-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "nvidia-vfx-effect.hpp"
#include "nvidia-vfx.hpp"
#include "nvidia/cuda/nvidia-cuda-gs-texture.hpp"
#include "nvidia/cuda/nvidia-cuda-obs.hpp"
#include "nvidia/cuda/nvidia-cuda.hpp"
#include "nvidia/cv/nvidia-cv-image.hpp"
#include "nvidia/cv/nvidia-cv-texture.hpp"
#include "obs/gs/gs-texture.hpp"
namespace streamfx::nvidia::vfx {
enum class greenscreen_mode {
QUALITY = 0,
PERFORMANCE = 1,
};
class greenscreen : protected effect {
bool _dirty;
std::list<std::shared_ptr<::streamfx::obs::gs::texture>> _buffer;
std::shared_ptr<::streamfx::nvidia::cv::texture> _input;
std::shared_ptr<::streamfx::nvidia::cv::image> _source;
std::shared_ptr<::streamfx::nvidia::cv::image> _destination;
std::shared_ptr<::streamfx::nvidia::cv::texture> _output;
std::shared_ptr<::streamfx::nvidia::cv::image> _tmp;
public:
~greenscreen();
greenscreen();
void size(std::pair<uint32_t, uint32_t>& size);
void set_mode(greenscreen_mode mode);
std::shared_ptr<::streamfx::obs::gs::texture> process(std::shared_ptr<::streamfx::obs::gs::texture> in);
std::shared_ptr<::streamfx::obs::gs::texture> get_color();
std::shared_ptr<::streamfx::obs::gs::texture> get_mask();
private:
void resize(uint32_t width, uint32_t height);
void load();
};
} // namespace streamfx::nvidia::vfx

View File

@ -0,0 +1,52 @@
// AUTOGENERATED COPYRIGHT HEADER START
// Copyright (C) 2021-2023 Michael Fabian 'Xaymar' Dirks <info@xaymar.com>
// AUTOGENERATED COPYRIGHT HEADER END
#pragma once
#include "nvidia-vfx-effect.hpp"
#include "nvidia-vfx.hpp"
#include "nvidia/cuda/nvidia-cuda-gs-texture.hpp"
#include "nvidia/cuda/nvidia-cuda-obs.hpp"
#include "nvidia/cuda/nvidia-cuda.hpp"
#include "nvidia/cv/nvidia-cv-image.hpp"
#include "nvidia/cv/nvidia-cv-texture.hpp"
#include "obs/gs/gs-texture.hpp"
namespace streamfx::nvidia::vfx {
class superresolution : protected effect {
bool _dirty;
std::shared_ptr<::streamfx::nvidia::cv::texture> _input;
std::shared_ptr<::streamfx::nvidia::cv::image> _convert_to_fp32;
std::shared_ptr<::streamfx::nvidia::cv::image> _source;
std::shared_ptr<::streamfx::nvidia::cv::image> _destination;
std::shared_ptr<::streamfx::nvidia::cv::image> _convert_to_u8;
std::shared_ptr<::streamfx::nvidia::cv::texture> _output;
std::shared_ptr<::streamfx::nvidia::cv::image> _tmp;
float _strength;
float _scale;
std::pair<uint32_t, uint32_t> _cache_input_size;
std::pair<uint32_t, uint32_t> _cache_output_size;
float _cache_scale;
public:
~superresolution();
superresolution();
void set_strength(float strength);
float strength();
void set_scale(float scale);
float scale();
void size(std::pair<uint32_t, uint32_t> const& size, std::pair<uint32_t, uint32_t>& input_size, std::pair<uint32_t, uint32_t>& output_size);
std::shared_ptr<::streamfx::obs::gs::texture> process(std::shared_ptr<::streamfx::obs::gs::texture> in);
private:
void resize(uint32_t width, uint32_t height);
void load();
};
} // namespace streamfx::nvidia::vfx

Some files were not shown because too many files have changed in this diff Show More