Merge branches 'workarounds' and 'video_provider_rework' into feature

This commit is contained in:
arch1t3cht 2023-07-16 17:57:09 +02:00
commit f9be4a854f
264 changed files with 222704 additions and 170565 deletions

View File

@ -2,9 +2,14 @@ name: Meson CI
on:
push:
branches: [ master ]
branches:
- master
- feature
- cibuilds
pull_request:
branches: [ master ]
branches:
- master
- feature
jobs:
build:
@ -33,6 +38,9 @@ jobs:
-Dfribidi:tests=false
-Dfribidi:docs=false
-Dlibass:fontconfig=disabled
-Davisynth=enabled
-Dbestsource=enabled
-Dvapoursynth=enabled
#- {
# name: Windows MinGW,
# os: windows-latest,
@ -50,25 +58,32 @@ jobs:
buildtype: release,
args: ''
}
- {
name: macOS Debug,
os: macos-latest,
buildtype: debugoptimized,
args: -Ddefault_library=static -Dbuild_osx_bundle=true -Dlocal_boost=true
}
- name: Ubuntu AppImage
os: ubuntu-22.04
buildtype: release
appimage: true
# distro ffms is currently broken
args: >-
--prefix=/usr
-Dbuild_appimage=true
-Ddefault_library=static
--force-fallback-for=ffms2
-Davisynth=enabled
-Dbestsource=enabled
-Dvapoursynth=enabled
- {
name: macOS Release,
os: macos-latest,
buildtype: release,
args: -Ddefault_library=static -Dbuild_osx_bundle=true -Dlocal_boost=true
args: -Ddefault_library=static -Dbuild_osx_bundle=true -Dlocal_boost=true -Dvapoursynth=enabled --force-fallback-for=ffms2
}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
fetch-depth: '0'
- uses: actions/setup-python@v2
- uses: actions/setup-python@v4
with:
python-version: '3.x'
@ -103,13 +118,13 @@ jobs:
brew install pulseaudio # NO OpenAL in github CI
- name: Install dependencies (Linux)
if: matrix.config.os == 'ubuntu-latest'
if: startsWith(matrix.config.os, 'ubuntu-')
run: |
sudo apt-get update
sudo apt-get install ninja-build build-essential libx11-dev libwxgtk3.0-gtk3-dev libfreetype6-dev pkg-config libfontconfig1-dev libass-dev libasound2-dev libffms2-dev intltool libboost-all-dev
sudo apt-get install ninja-build build-essential libx11-dev libwxgtk3.0-gtk3-dev libfreetype6-dev pkg-config libfontconfig1-dev libass-dev libasound2-dev libffms2-dev intltool libboost-all-dev libhunspell-dev libuchardet-dev libpulse-dev libopenal-dev libjansson-dev
- name: Configure
run: meson build ${{ matrix.config.args }} -Dbuildtype=${{ matrix.config.buildtype }}
run: meson setup build ${{ matrix.config.args }} -Dbuildtype=${{ matrix.config.buildtype }}
- name: Build
run: meson compile -C build
@ -120,21 +135,25 @@ jobs:
# Windows artifacts
- name: Generate Windows installer
if: matrix.config.os == 'windows-latest'
run: meson compile win-installer -C build
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run:
meson compile win-installer -C build
- name: Generate Windows portable installer
if: matrix.config.os == 'windows-latest'
run: cd build && ninja win-portable
- name: Upload artifacts - win_installer
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
if: matrix.config.os == 'windows-latest'
with:
name: ${{ matrix.config.name }} - installer
path: build/Aegisub-*.exe
if-no-files-found: error
- name: Upload artifacts - portable.zip
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
if: matrix.config.os == 'windows-latest'
with:
name: ${{ matrix.config.name }} - portable
@ -148,8 +167,33 @@ jobs:
meson compile osx-build-dmg -C build
- name: Upload artifacts - macOS dmg
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
if: matrix.config.os == 'macos-latest'
with:
name: ${{ matrix.config.name }} - installer
path: build/Aegisub-*.dmg
if-no-files-found: error
# Linux artifacts (AppImage)
- name: Generate AppImage
if: matrix.config.appimage
run: |
mkdir -p appimage/appdir
meson install -C build --destdir=../appimage/appdir
cd appimage
sudo apt-get install libfuse2
curl -L "https://github.com/linuxdeploy/linuxdeploy/releases/download/1-alpha-20220822-1/linuxdeploy-x86_64.AppImage" -o linuxdeploy
curl -L "https://github.com/AppImage/AppImageKit/releases/download/13/appimagetool-x86_64.AppImage" -o appimagetool
chmod +x linuxdeploy appimagetool
./linuxdeploy --appdir appdir --desktop-file=appdir/aegisub.desktop
./appimagetool appdir
- name: Upload artifacts - Linux AppImage
uses: actions/upload-artifact@v3
if: matrix.config.appimage
with:
name: ${{ matrix.config.name }}
path: appimage/*.AppImage
if-no-files-found: error

5
.gitignore vendored
View File

@ -1,5 +1,6 @@
/tests/data
automation/vapoursynth/__pycache__
packages/desktop/aegisub.desktop
packages/desktop/aegisub.desktop.template
src/libresrc/bitmap.cpp
@ -18,6 +19,8 @@ tools/repack-thes-dict.dSYM
# Meson
build*/
subprojects/avisynth
subprojects/bestsource/
subprojects/boost*/
subprojects/cairo*
subprojects/dav1d
@ -30,6 +33,7 @@ subprojects/glib*
subprojects/googletest-*
subprojects/harfbuzz
subprojects/icu
subprojects/jansson
subprojects/libass
subprojects/libffi*
subprojects/libpng-*
@ -42,3 +46,4 @@ subprojects/zlib-*
subprojects/dirent-*
subprojects/hunspell-*
subprojects/uchardet-*
subprojects/vapoursynth

105
README.md
View File

@ -1,3 +1,106 @@
## arch1t3cht's Aegisub "fork"
Download release builds [here](https://github.com/arch1t3cht/Aegisub/releases), or the latest CI builds [here](https://github.com/arch1t3cht/Aegisub/actions).
The release page also has detailed list of all changes and new features. If you're interested in the technical details or want to compile yourself, read on.
### Don't we have enough Aegisub forks already??
We absolutely do, and I'm aware that adding another one [doesn't sound like](https://xkcd.com/927/) a [good idea on paper](https://cdn.discordapp.com/attachments/425357202963038208/1007103606421459004/unknown.png). However,
- None of the existing forks are completely satisfying at the moment:
- [wangqr's fork](https://github.com/wangqr/Aegisub) is actively maintained, but focussing more on stability. It's missing most of the modern features.
- [AegisubDC](https://github.com/Ristellise/AegisubDC) has the most modern features (in particular video-panning), but is Windows-only and not actively maintained anymore.
- [The TypesettingTools fork](https://github.com/TypesettingTools/Aegisub) is the one that will one day become the upstream version and builds relatively effortlessly on all operating systems, but at the moment it's not moving much. It's the base for this fork, and I hope to one day merge most of these additions into it.
- Only PR'ing the changes in here to various forks would cause even more chaos
- ~~I try to convince myself that this isn't really a "fork" in the traditional sense - one which aims to provide extended support and stability fixes. It's a collection of new feature additions which I built myself, together with some of the most important new features floating around other forks.~~ At this point it's probably too late to still be saying this. Still, the general mission hasn't changed. This fork collects new features and critical bugfixes, but won't be putting extra time into maintenance aspects like cleanup and refactors. Partly, this is also because any big refactors would make it harder to pull these changes into upstream repositories or future forks.
While this is usually also the version of Aegisub I'm currently using, I make no promises on stability. **Don't** use this version if you're just looking for any version of Aegisub - this is mostly intended for typesetting and other advanced usage.
### Organization
Being a collection of different feature additions, this repository consists of a set of branches for different features, so that they can easily be merged into other repositories. The [`feature`](https://github.com/arch1t3cht/Aegisub/tree/feature) branch merges together all the features I deem as currently usable. Due to the structure of the repository, I will be force-pushing to this branch and some of the individual branches very frequently, so they're not ideal for basing further branches on.
The `cibuilds` branch makes some CI builds of snapshots of `feature` at relevant points in time.
### Branch/Feature list
This list is for navigating the repository. Go to the [release page](https://github.com/arch1t3cht/Aegisub/releases) for a more structured changelog.
- [`folding`](https://github.com/arch1t3cht/Aegisub/tree/folding): Add the ability to visually group and collapse lines in the subtitle grid
- [`lua_api`](https://github.com/arch1t3cht/Aegisub/tree/lua_api): Add new functions to the Lua automation API, like controlling the selection or cursor in the text edit box
- [`vector_clip_actions`](https://github.com/arch1t3cht/Aegisub/tree/vector_clip_actions): Make the different modes of the vector clip tool (lines, bezier curves, adding points, etc) bindable to hotkeys
- [`color_picker_fix2`](https://github.com/arch1t3cht/Aegisub/tree/color_picker_fix2): Add an option (under "Interface") to restrict the color picker to the window, which fixes the color picker on Linux in a lot of cases.
- [`avisynth`](https://github.com/arch1t3cht/Aegisub/tree/avisynth): Reenable Avisynth support on Windows and enable Avisynth on Linux
- [`bestsource`](https://github.com/arch1t3cht/Aegisub/tree/bestsource): Add BestSource audio and video source. This source is slower than others by multiple orders of magnitude, but in exchange it can guarantee exact seeking.
- [`vapoursynth`](https://github.com/arch1t3cht/Aegisub/tree/vapoursynth): Add Vapoursynth audio and video source
- [`bugfixes`](https://github.com/arch1t3cht/Aegisub/tree/bugfixes): Various fixes necessary for compilation. Most branches are based on this.
- [`workarounds`](https://github.com/arch1t3cht/Aegisub/tree/workarounds): Same as `bugfixes`, but these are hacky fixes that probably shouldn't be pulled without more work.
- [`fixes`](https://github.com/arch1t3cht/Aegisub/tree/fixes): Miscellaneous bugfixes
- [`misc`](https://github.com/arch1t3cht/Aegisub/tree/misc): Other miscellaneous additions
- [`wangqr_gui`](https://github.com/arch1t3cht/Aegisub/tree/wangqr_gui): Merge wangqr's changes regarding the GUI. In particular, add high-DPI compatibility.
- [`misc_dc`](https://github.com/arch1t3cht/Aegisub/tree/misc_dc): Miscellaneous changes taken from AegisubDC
- [`xa2-ds`](https://github.com/arch1t3cht/Aegisub/tree/xa2-ds): Add XAudio2 backend and allow stereo playback for some other backends, by wangqr and Shinon.
- [`stereo`](https://github.com/arch1t3cht/Aegisub/tree/stereo): Add multi-channel support for the other audio backends where possible.
- [`video_panning_option`](https://github.com/arch1t3cht/Aegisub/tree/video_panning_option): Merge [moex3's video zoom and panning](https://github.com/TypesettingTools/Aegisub/pull/150), with several bugfixes and more options to control zoom behavior
- [`spectrum-frequency-mapping`](https://github.com/arch1t3cht/Aegisub/tree/spectrum-frequency-mapping): Merge EleonoreMizo's [spectrum display improvements](https://github.com/TypesettingTools/Aegisub/pull/94), and also make Shift+Scroll vertically zoom the audio display
- [`wangqr_time_video`](https://github.com/arch1t3cht/Aegisub/tree/wangqr_time_video): Merge wangqr's feature adding a tool for timing subtitles to changes in the video
### Troubleshooting
I'll gladly take any bug reports, but if you encounter an issue, please check first if it occurs only on my fork, or also on [earlier TSTools builds](https://github.com/TypesettingTools/Aegisub/actions).
If it wasn't introduced by my fork, I can still take a look, but I can't promise anything.
You can find me for support on various servers, including the cave and the TSTools server linked below.
#### Aegisub on Linux doesn't recognize my GTK theme
This is probably because you're building with wxgtk2. Building with wxgtk3 fixes this, but causes some problems of its own (notably the broken color picker, occasional crashes when opening file dialogs from automation scripts, and general layouting issues).
The exact way of switching depends on your Linux distribution, but essentially you need to ensure that `wx-config` or the next best variant of it points to wxgtk3. If it points to wxgtk2 by default and deinstalling wxgtk2 isn't an option, you can also temporarily move it out of the path or use a `native-file` in your meson project. Then, fully reconfigure meson using `meson configure --clearcache` and `meson setup --reconfigure`.
#### The video is desynced / Frames don't appear at the right time
This is probably due to the ffms2 seeking bug ([#394](https://github.com/FFMS/ffms2/issues/394)). On Windows, this specific regression shouldn't happen anymore. On Linux, you need to install the latest git version of ffms2 - for example the [`ffms2-git`](https://aur.archlinux.org/packages/ffms2-git) AUR package on Arch linux, or just compile it yourself.
If it's not because of this particular bug, you can also try an alternative video source like LSMASHSource via Avisynth or Vapoursynth, or BestSource.
#### On Windows: Aegisub crashes whenever I open a video
If you're compiling yourself, try adding `--force-fallback-for=zlib` to the meson options.
### Compilation
If you're just looking to install Aegisub, you might want to check out the [releases page](https://github.com/arch1t3cht/Aegisub/releases) or the [CI builds](https://github.com/arch1t3cht/Aegisub/actions) first.
For compilation on Windows, see the TSTools documentation below. Also check the [GitHub workflow](https://github.com/arch1t3cht/Aegisub/blob/cibuilds/.github/workflows/ci.yml) for the project arguments.
On Arch Linux, there is an AUR package called [aegisub-arch1t3cht-git](https://aur.archlinux.org/packages/aegisub-arch1t3cht-git). It's not maintained by me but seems to work.
On other Linux distributions or for manual compilation you can use this package or the [TSTools PKGBUILD](https://aur.archlinux.org/packages/aegisub-ttools-meson-git) as a reference, in particular for installing the necessary dependencies if you don't want to compile them yourself.
If all dependencies are installed:
- Install Meson
- Clone the repository
- In the repository, run `meson setup build --buildtype=release` for the default configuration. See below for further options.
- `cd` to the `build` directory and run `ninja`
- You'll get an `aegisub` binary in the `build` folder. To install it to a system-wide location, run `ninja install`. To install to `/usr` instead of `/usr/local`, pass `--prefix=/usr` when configuring or reconfiguring meson.
- When recompiling after pulling new commits, skip the `meson setup` setup and just immediately run `ninja` from the build directory - even when the build configuration changed.
#### Compilation flags
Some features are not enabled by default. To enable them, pass `-D<feature>=enabled` with the `meson setup` command:
- `-Davisynth=enabled`: Avisynth support
- `-Dbestsource=enabled`: BestSource
- `-Dvapoursynth=enabled`: Vapoursynth support
You can also disable options that are active by default in the same way. Check the file `meson_options.txt` for all options.
To change the options of an existing build directory, run `meson setup --reconfigure <new arguments>` from inside the `build` directory.
### Dependencies
Apart from the dependencies for the TSTools version, there are some additional dependencies. These are cloned and compiled from scratch if not found, but you might want to install binaries instead:
- `jansson`: For BestSource
- `ffmpeg`: Becomes a direct dependency when compiling with BestSource
- `avisynth` (or `avisynthplus`): Optional run-time dependency for the Avisynth source
- `vapoursynth`: Optional run-time dependency for the VapourSynth source
The following VapourSynth plugins are used by the default scripts set in the default configuration:
- [`lsmas`](https://github.com/AkarinVS/L-SMASH-Works): For LWLibavSource
- [`bas`](https://github.com/vapoursynth/bestaudiosource): For BestAudioSource
- [`wwxd`](https://github.com/dubhater/vapoursynth-wwxd) and [`scxvid`](https://github.com/dubhater/vapoursynth-scxvid) (depending on settings): For keyframe generation
# Aegisub
For binaries and general information [see the homepage](http://www.aegisub.org).
@ -30,7 +133,7 @@ All other dependencies are either stored in the repository or are included as su
Building:
1. Clone Aegisub's repository: `git clone https://github.com/TypesettingTools/Aegisub.git`
1. Clone Aegisub's repository: `git clone https://github.com/arch1t3cht/Aegisub.git`
2. From the Visual Studio "x64 Native Tools Command Prompt", generate the build directory: `meson build -Ddefault_library=static` (if building for release, add `--buildtype=release`)
3. Build with `cd build` and `ninja`

View File

@ -41,3 +41,7 @@ install_data(
'include/aegisub/unicode.moon',
'include/aegisub/util.moon',
install_dir: automation_dir / 'include' / 'aegisub')
install_data(
'vapoursynth/aegisub_vs.py',
install_dir: automation_dir / 'vapoursynth')

View File

@ -0,0 +1,69 @@
Video Frame functions in Automation 4
This file describes the interface used for reading frames from loaded videos.
---
Get a specific frame from the currently loaded video on which multiple other
functions are defined.
function aegisub.get_frame(frame_number, withSubtitles)
@frame_number (number)
Number of frame to retrieve.
@withSubtitles (boolean)
Optional. Whether to load with subtitles drawn on to the frame.
Returns: frame (userdata)
The frame object defines multiple other functions. See below.
---
Get width of frame object.
function frame:width()
Returns: number
Width in pixels.
---
Get height of frame object.
function frame:height()
Returns: number
Height in pixels.
---
Get RGB pixel value at a certain position of frame object.
function frame:frame:getPixel(x, y)
@x (number)
Pixel to retrieve on the x-axis
@y (number)
Pixel to retrieve on the y-axis
Returns: number
Integer value representing the RGB pixel value.
---
Get ASS formated pixel value at a certain position of frame object.
function frame:getPixelFormatted(x, y)
@x (number)
Pixel to retrieve on the x-axis
@y (number)
Pixel to retrieve on the y-axis
Returns: string
String in ASS format representing the pixel value. e.g. "&H0073FF&"
---

View File

@ -0,0 +1,66 @@
Automation 4 Gui Functions
This document describes the available Automation 4 functions for
controlling the editor's graphical interface. These all reside in the
table aegisub.gui .
---
Getting and setting the selection and cursor in the text edit box
This set of functions controls the selection in the text edit box.
All indices are counted starting from 1, following Lua conventions.
The setter functions are applied after all subtitle changes have been
applied. Only the latest update is applied.
The getter functions return the state after the latest update by
the setter functions, or the original state if there were none.
function aegisub.gui.get_cursor()
Returns: 1 number
1. The position of the cursor in the text edit field.
---
function aegisub.gui.get_selection()
Returns: 2 values, all numbers.
1. Starting position of the selection.
2. Ending position of the selection, always larger or equal
than the stating position.
---
function aegisub.gui.set_cursor(position)
@position (number)
The new position of the cursor.
Returns: 0 values
---
function aegisub.gui.set_selection(start, end)
@start (number)
The new start of the selection.
@end (number)
The new end of the selection, i.e. where the cursor will be.
Can be smaller than the start, in which case the cursor will
be on the left side of the selection.
Returns: 0 values
---
Determining whether there are unsaved changes
function aegisub.gui.is_modified()
Returns: 1 boolean
1. Whether the current file has unsaved changes.
---

View File

@ -0,0 +1,273 @@
"""
Utility functions for loading video files into Aegisub using the VapourSynth
video provider.
When encountering a file whose file extension is not .py or .vpy, the
VapourSynth audio and video providers will execute the respective default
script set in Aegisub's configuration, with the following string variables set:
- filename: The path to the file that's being opened.
- __aegi_data, __aegi_dictionary, __aegi_local, __aegi_script, __aegi_temp, __aegi_user:
The values of ?data, ?dictionary, etc. respectively.
- __aegi_vscache: The path to a directory where the VapourSynth script can
store cache files. This directory is cleaned by Aegisub when it gets too
large (as defined by Aegisub's configuration).
The provider reads the video from the script's 0-th output node. By default,
the video is assumed to be CFR. The script can pass further information to
Aegisub using the following variables:
- __aegi_timecodes: List[int] | str: The timecodes for the video, or the
path to a timecodes file.
- __aegi_keyframes: List[int] | str: List of frame numbers to load as
keyframes, or the path to a keyframes file.
- __aegi_hasaudio: int: If nonzero, Aegisub will try to load an audio track
from the same file.
This module provides some utility functions to obtain timecodes, keyframes, and
other data.
"""
import os
import os.path
import re
from enum import Enum
from tkinter.messagebox import askyesno
from collections import deque
from typing import Any, Dict, List, Tuple
import vapoursynth as vs
core = vs.core
aegi_vscache: str = ""
aegi_vsplugins: str = ""
plugin_extension = ".dll" if os.name == "nt" else ".so"
def set_paths(vars: dict):
"""
Initialize the wrapper library with the given configuration directories.
Should usually be called at the start of the default script as
set_paths(globals())
"""
global aegi_vscache
global aegi_vsplugins
aegi_vscache = vars["__aegi_vscache"]
aegi_vsplugins = vars["__aegi_vsplugins"]
def ensure_plugin(name: str, loadname: str, errormsg: str):
"""
Ensures that the VapourSynth plugin with the given name exists.
If it doesn't, it tries to load it from `loadname`.
If that fails, it raises an error with the given error message.
"""
if hasattr(core, name):
return
if aegi_vsplugins and loadname:
try:
core.std.LoadPlugin(os.path.join(aegi_vsplugins, loadname + plugin_extension))
if hasattr(core, name):
return
except vs.Error:
pass
raise vs.Error(errormsg)
def make_lwi_cache_filename(filename: str) -> str:
"""
Given a path to a video, will return a file name like the one LWLibavSource
would use for a .lwi file.
"""
max_len = 254
extension = ".lwi"
if len(filename) + len(extension) > max_len:
filename = filename[-(max_len + len(extension)):]
return "".join(("_" if c in "/\\:" else c) for c in filename) + extension
def make_keyframes_filename(filename: str) -> str:
"""
Given a path `path/to/file.mkv`, will return the path
`path/to/file_keyframes.txt`.
"""
extlen = filename[::-1].find(".") + 1
return filename[:len(filename) - extlen] + "_keyframes.txt"
lwindex_re1 = re.compile(r"Index=(?P<Index>-?[0-9]+),POS=(?P<POS>-?[0-9]+),PTS=(?P<PTS>-?[0-9]+),DTS=(?P<DTS>-?[0-9]+),EDI=(?P<EDI>-?[0-9]+)")
lwindex_re2 = re.compile(r"Key=(?P<Key>-?[0-9]+),Pic=(?P<Pic>-?[0-9]+),POC=(?P<POC>-?[0-9]+),Repeat=(?P<Repeat>-?[0-9]+),Field=(?P<Field>-?[0-9]+)")
streaminfo_re = re.compile(r"Codec=(?P<Codec>[0-9]+),TimeBase=(?P<TimeBase>[0-9\/]+),Width=(?P<Width>[0-9]+),Height=(?P<Height>[0-9]+),Format=(?P<Format>[0-9a-zA-Z]+),ColorSpace=(?P<ColorSpace>[0-9]+)")
class LWIndexFrame:
pts: int
key: int
def __init__(self, raw: list[str]):
match1 = lwindex_re1.match(raw[0])
match2 = lwindex_re2.match(raw[1])
if not match1 or not match2:
raise ValueError("Invalid lwindex format")
self.pts = int(match1.group("PTS"))
self.key = int(match2.group("Key"))
def __int__(self) -> int:
return self.pts
def info_from_lwindex(indexfile: str) -> Dict[str, List[int]]:
"""
Given a path to an .lwi file, will return a dictionary containing
information about the video, with the keys
- timcodes: The timecodes.
- keyframes: Array of frame numbers of keyframes.
"""
with open(indexfile, encoding="latin1") as f:
index = f.read().splitlines()
indexstart, indexend = index.index("</StreamInfo>") + 1, index.index("</LibavReaderIndex>")
frames = [LWIndexFrame(index[i:i+2]) for i in range(indexstart, indexend, 2)]
frames.sort(key=int)
streaminfo = streaminfo_re.match(index[indexstart - 2])
if not streaminfo:
raise ValueError("Invalid lwindex format")
timebase_num, timebase_den = [int(i) for i in streaminfo.group("TimeBase").split("/")]
return {
"timecodes": [(f.pts * 1000 * timebase_num) // timebase_den for f in frames],
"keyframes": [i for i, f in enumerate(frames) if f.key],
}
def wrap_lwlibavsource(filename: str, cachedir: str | None = None, **kwargs: Any) -> Tuple[vs.VideoNode, Dict[str, List[int]]]:
"""
Given a path to a video file and a directory to store index files in
(usually __aegi_vscache), will open the video with LWLibavSource and read
the generated .lwi file to obtain the timecodes and keyframes.
Additional keyword arguments are passed on to LWLibavSource.
"""
if cachedir is None:
cachedir = aegi_vscache
try:
os.mkdir(cachedir)
except FileExistsError:
pass
cachefile = os.path.join(cachedir, make_lwi_cache_filename(filename))
ensure_plugin("lsmas", "libvslsmashsource", "To use Aegisub's LWLibavSource wrapper, the `lsmas` plugin for VapourSynth must be installed")
if b"-Dcachedir" not in core.lsmas.Version()["config"]: # type: ignore
raise vs.Error("To use Aegisub's LWLibavSource wrapper, the `lsmas` plugin must support the `cachedir` option for LWLibavSource.")
clip = core.lsmas.LWLibavSource(source=filename, cachefile=cachefile, **kwargs)
return clip, info_from_lwindex(cachefile)
def make_keyframes(clip: vs.VideoNode, use_scxvid: bool = False,
resize_h: int = 360, resize_format: int = vs.GRAY8,
**kwargs: Any) -> List[int]:
"""
Generates a list of keyframes from a clip, using either WWXD or Scxvid.
Will be slightly more efficient with the `akarin` plugin installed.
:param clip: Clip to process.
:param use_scxvid: Whether to use Scxvid. If False, the function uses WWXD.
:param resize_h: Height to resize the clip to before processing.
:param resize_format: Format to convert the clip to before processing.
The remaining keyword arguments are passed on to the respective filter.
"""
clip = core.resize.Bilinear(clip, width=resize_h * clip.width // clip.height, height=resize_h, format=resize_format)
if use_scxvid:
ensure_plugin("scxvid", "libscxvid", "To use the keyframe generation, the scxvid plugin for VapourSynth must be installed")
clip = core.scxvid.Scxvid(clip, **kwargs)
else:
ensure_plugin("wwxd", "libwwxd64", "To use the keyframe generation, the wwxdplugin for VapourSynth must be installed")
clip = core.wwxd.WWXD(clip, **kwargs)
keyframes = {}
done = 0
def _cb(n: int, f: vs.VideoFrame) -> vs.VideoFrame:
nonlocal done
keyframes[n] = f.props._SceneChangePrev if use_scxvid else f.props.Scenechange # type: ignore
done += 1
if done % (clip.num_frames // 25) == 0:
vs.core.log_message(vs.MESSAGE_TYPE_INFORMATION, "Detecting keyframes... {}% done.\n".format(100 * done // clip.num_frames))
return f
deque(clip.std.ModifyFrame(clip, _cb).frames(close=True), 0)
vs.core.log_message(vs.MESSAGE_TYPE_INFORMATION, "Done detecting keyframes.\n")
return [n for n in range(clip.num_frames) if keyframes[n]]
def save_keyframes(filename: str, keyframes: List[int]):
"""
Saves a list of keyframes in Aegisub's keyframe format v1 to a file with
the given filename.
"""
with open(filename, "w") as f:
f.write("# keyframe format v1\n")
f.write("fps 0\n")
f.write("".join(f"{n}\n" for n in keyframes))
class GenKeyframesMode(Enum):
NEVER = 0
ALWAYS = 1
ASK = 2
def get_keyframes(filename: str, clip: vs.VideoNode, fallback: str | List[int],
generate: GenKeyframesMode = GenKeyframesMode.ASK, **kwargs: Any) -> str | List[int]:
"""
Looks for a keyframes file for the given filename.
If no file was found, this function can generate a keyframe file for the given clip next
to the given filename using WWXD or Scxvid (see the make_keyframes docstring).
Whether or not keyframes are generated depends on the `generate` argument.
Depending on the `generate` argument, the function will
- always generate keyframes when no file was found
- never generate keyframes when no file was found
(and return the fallback keyframes instead)
- show a dialog to ask the user whether keyframes should be
generated or not
Additional keyword arguments are passed on to make_keyframes.
"""
kffilename = make_keyframes_filename(filename)
if not os.path.exists(kffilename):
if generate == GenKeyframesMode.NEVER:
return fallback
if generate == GenKeyframesMode.ASK and not askyesno("Generate Keyframes", \
"No keyframes file was found for this video file.\nShould Aegisub detect keyframes from the video?\nThis will take a while.", default="no"):
return fallback
vs.core.log_message(vs.MESSAGE_TYPE_INFORMATION, "No keyframes file found, detecting keyframes...\n")
keyframes = make_keyframes(clip, **kwargs)
save_keyframes(kffilename, keyframes)
return kffilename
def check_audio(filename: str, **kwargs: Any) -> bool:
"""
Checks whether the given file has an audio track by trying to open it with
BestAudioSource. Requires the `bas` plugin to return correct results, but
won't crash if it's not installed.
Additional keyword arguments are passed on to BestAudioSource.
"""
try:
ensure_plugin("bas", "BestAudioSource", "")
vs.core.bas.Source(source=filename, **kwargs)
return True
except AttributeError:
pass
except vs.Error:
pass
return False

View File

@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
version="1.0"
width="64"
height="64"
id="svg2385"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<defs
id="defs2387" />
<path
d="M 21.534604,6.1472094 51.814439,14.337199 58.897111,53.851308 5.1215609,57.175696 Z"
id="rect2387"
style="display:inline;fill:none;stroke:#80b3ff;stroke-width:5;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1" />
<path
d="m 26.619476,19.563308 17.90644,3.127764 1.752386,21.835103 -25.106058,0.109689 z"
id="rect2387-8"
style="display:inline;fill:#ff0000;stroke:#803300;stroke-width:2.32035;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none" />
</svg>

After

Width:  |  Height:  |  Size: 836 B

View File

@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
version="1.0"
width="64"
height="64"
id="svg2385"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<defs
id="defs2387" />
<path
d="M 32,4 V 60 M 48,4 V 60 M 16,4 v 56"
id="rect2387"
style="display:inline;fill:none;stroke:#803300;stroke-width:3;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
<path
d="M 60,32 H 4 M 60,48 H 4 M 60,16 H 4"
id="rect2387-5"
style="display:inline;fill:none;stroke:#803300;stroke-width:3;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
</svg>

After

Width:  |  Height:  |  Size: 791 B

View File

@ -0,0 +1,29 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
version="1.0"
width="64"
height="64"
id="svg2385"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<defs
id="defs2387" />
<path
d="M 21.534604,6.1472094 51.814439,14.337199 58.897111,53.851308 5.1215609,57.175696 Z"
id="rect2387"
style="display:inline;fill:none;stroke:#ff0000;stroke-width:3;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:6, 9;stroke-dashoffset:0;stroke-opacity:1" />
<g
id="g4605"
transform="matrix(1.2858376,0,0,1.2858376,-8.5433316,-7.7280751)">
<path
d="M 41.542973,29.070062 V 43.827625 L 25.457027,43.82747 V 29.070062 Z"
id="rect2387-8"
style="display:inline;fill:#80b3ff;fill-opacity:1;stroke:#002060;stroke-width:2.2344;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1" />
<path
d="m 28.77085,28.13212 c 0,-5.027712 1.267775,-8.633444 5.083463,-8.633444 3.815688,0 4.57289,3.490135 4.57289,8.763711"
style="display:inline;fill:none;fill-opacity:1;stroke:#002060;stroke-width:3.11081;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1"
id="path2399" />
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.3 KiB

View File

@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
version="1.0"
width="64"
height="64"
id="svg2385"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<defs
id="defs2387" />
<path
d="M 21.534604,6.1472094 51.814439,14.337199 58.897111,53.851308 5.1215609,57.175696 Z"
id="rect2387"
style="display:inline;fill:none;stroke:#ff0000;stroke-width:3;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:6, 9;stroke-dashoffset:0;stroke-opacity:1" />
<path
d="M 30.272664,27.88368 H 42.773336 L 36.523,40.384351 v 0 z"
id="rect3294"
style="fill:#ff0000;stroke:#803300;stroke-width:2.50013;stroke-linecap:round;stroke-linejoin:round" />
</svg>

After

Width:  |  Height:  |  Size: 794 B

View File

@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
version="1.0"
width="64"
height="64"
id="svg2385"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<defs
id="defs2387" />
<path
d="M 21.534604,6.1472094 51.814439,14.337199 58.897111,53.851308 5.1215609,57.175696 Z"
id="rect2387"
style="display:inline;fill:none;stroke:#ff0000;stroke-width:3;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:6, 9;stroke-dashoffset:0;stroke-opacity:1" />
<path
d="m 37.858975,14.096991 h 12.50005 L 44.109,26.59704 v 0 z"
id="rect3294"
style="fill:#ff0000;stroke:#803300;stroke-width:2.50001;stroke-linecap:round;stroke-linejoin:round" />
</svg>

After

Width:  |  Height:  |  Size: 793 B

View File

@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
version="1.0"
width="64"
height="64"
id="svg2385"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<defs
id="defs2387" />
<path
d="M 8,8 H 56 V 56 H 8 Z"
id="rect2391"
style="fill:none;stroke:#ff0000;stroke-width:3;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:6, 9;stroke-dashoffset:0;stroke-opacity:1" />
<path
d="m 37.858519,14.096401 h 12.500872 l -6.250436,12.500871 v 0 z"
id="rect3294"
style="fill:#ff0000;stroke:#803300;stroke-width:2.50017;stroke-linecap:round;stroke-linejoin:round" />
</svg>

After

Width:  |  Height:  |  Size: 721 B

View File

@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
version="1.0"
width="64"
height="64"
id="svg2385"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<defs
id="defs2387" />
<path
d="M 21.534604,6.1472094 51.814439,14.337199 58.897111,53.851308 5.1215609,57.175696 Z"
id="rect2387"
style="display:inline;fill:none;stroke:#ff0000;stroke-width:3;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:6, 9;stroke-dashoffset:0;stroke-opacity:1" />
<path
d="m 26.619476,19.563308 17.90644,3.127764 1.752386,21.835103 -25.106058,0.109689 z"
id="rect2387-8"
style="display:inline;fill:#80b3ff;fill-opacity:1;stroke:#002060;stroke-width:2.32035;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1" />
</svg>

After

Width:  |  Height:  |  Size: 888 B

View File

@ -60,7 +60,11 @@ public:
case dt::ERROR: SetStyling(tok.length, ss::ERROR); break;
case dt::ARG: SetStyling(tok.length, ss::PARAMETER); break;
case dt::COMMENT: SetStyling(tok.length, ss::COMMENT); break;
case dt::DRAWING: SetStyling(tok.length, ss::DRAWING); break;
case dt::DRAWING_CMD:SetStyling(tok.length, ss::DRAWING_CMD);break;
case dt::DRAWING_X: SetStyling(tok.length, ss::DRAWING_X); break;
case dt::DRAWING_Y: SetStyling(tok.length, ss::DRAWING_Y); break;
case dt::DRAWING_ENDPOINT_X: SetStyling(tok.length, ss::DRAWING_ENDPOINT_X); break;
case dt::DRAWING_ENDPOINT_Y: SetStyling(tok.length, ss::DRAWING_ENDPOINT_Y); break;
case dt::TEXT: SetStyling(tok.length, ss::NORMAL); break;
case dt::TAG_NAME: SetStyling(tok.length, ss::TAG); break;
case dt::OPEN_PAREN: case dt::CLOSE_PAREN: case dt::ARG_SEP: case dt::TAG_START:
@ -72,6 +76,8 @@ public:
case dt::WHITESPACE:
if (ranges.size() && ranges.back().type == ss::PARAMETER)
SetStyling(tok.length, ss::PARAMETER);
else if (ranges.size() && ranges.back().type == ss::DRAWING_ENDPOINT_X)
SetStyling(tok.length, ss::DRAWING_ENDPOINT_X); // connect the underline between x and y of endpoints
else
SetStyling(tok.length, ss::NORMAL);
break;
@ -118,6 +124,64 @@ class WordSplitter {
}
}
void SplitDrawing(size_t &i) {
size_t starti = i;
// First, split into words
size_t dpos = pos;
size_t tlen = 0;
bool tokentype = text[pos] == ' ' || text[pos] == '\t';
while (tlen < tokens[i].length) {
bool newtype = text[dpos] == ' ' || text[dpos] == '\t';
if (newtype != tokentype) {
tokentype = newtype;
SwitchTo(i, tokentype ? dt::DRAWING_FULL : dt::WHITESPACE, tlen);
tokens[i].type = tokentype ? dt::WHITESPACE : dt::DRAWING_FULL;
tlen = 0;
}
++tlen;
++dpos;
}
// Then, label all the tokens
dpos = pos;
int num_coord = 0;
char lastcmd = ' ';
for (size_t j = starti; j <= i; j++) {
char c = text[dpos];
if (tokens[j].type == dt::WHITESPACE) {
} else if (c == 'm' || c == 'n' || c == 'l' || c == 's' || c == 'b' || c == 'p' || c == 'c') {
tokens[j].type = dt::DRAWING_CMD;
if (tokens[j].length != 1)
tokens[j].type = dt::ERROR;
if (num_coord % 2 != 0)
tokens[j].type = dt::ERROR;
lastcmd = c;
num_coord = 0;
} else {
bool valid = true;
for (size_t k = 0; k < tokens[j].length; k++) {
char c = text[dpos + k];
if (!((c >= '0' && c <= '9') || c == '.' || c == '+' || c == '-' || c == 'e' || c == 'E')) {
valid = false;
}
}
if (!valid)
tokens[j].type = dt::ERROR;
else if (lastcmd == 'b' && num_coord % 6 >= 4)
tokens[j].type = num_coord % 2 == 0 ? dt::DRAWING_ENDPOINT_X : dt::DRAWING_ENDPOINT_Y;
else
tokens[j].type = num_coord % 2 == 0 ? dt::DRAWING_X : dt::DRAWING_Y;
++num_coord;
}
dpos += tokens[j].length;
}
}
public:
WordSplitter(std::string const& text, std::vector<DialogueToken> &tokens)
: text(text)
@ -131,6 +195,9 @@ public:
size_t len = tokens[i].length;
if (tokens[i].type == dt::TEXT)
SplitText(i);
else if (tokens[i].type == dt::DRAWING_FULL) {
SplitDrawing(i);
}
pos += len;
}
}
@ -163,9 +230,51 @@ void MarkDrawings(std::string const& str, std::vector<DialogueToken> &tokens) {
switch (tokens[i].type) {
case dt::TEXT:
if (in_drawing)
tokens[i].type = dt::DRAWING;
tokens[i].type = dt::DRAWING_FULL;
break;
case dt::TAG_NAME:
if (i + 3 < tokens.size() && (len == 4 || len == 5) && !strncmp(str.c_str() + pos + len - 4, "clip", 4)) {
if (tokens[i + 1].type != dt::OPEN_PAREN)
goto tag_p;
size_t drawing_start = 0;
size_t drawing_end = 0;
// Try to find a vector clip
for (size_t j = i + 2; j < tokens.size(); j++) {
if (tokens[j].type == dt::ARG_SEP) {
if (drawing_start) {
break; // More than two arguents - this is a rectangular clip
}
drawing_start = j + 1;
} else if (tokens[j].type == dt::CLOSE_PAREN) {
drawing_end = j;
break;
} else if (tokens[j].type != dt::WHITESPACE && tokens[j].type != dt::ARG) {
break;
}
}
if (!drawing_end)
goto tag_p;
if (!drawing_start)
drawing_start = i + 2;
if (drawing_end == drawing_start)
goto tag_p;
// We found a clip between drawing_start and drawing_end. Now, join
// all the tokens into one and label it as a drawing.
size_t tokenlen = 0;
for (size_t j = drawing_start; j < drawing_end; j++) {
tokenlen += tokens[j].length;
}
tokens[drawing_start].length = tokenlen;
tokens[drawing_start].type = dt::DRAWING_FULL;
tokens.erase(tokens.begin() + drawing_start + 1, tokens.begin() + drawing_end);
last_ovr_end -= drawing_end - drawing_start - 1;
}
tag_p:
if (len != 1 || i + 1 >= tokens.size() || str[pos] != 'p')
break;
@ -199,7 +308,7 @@ void MarkDrawings(std::string const& str, std::vector<DialogueToken> &tokens) {
case dt::KARAOKE_VARIABLE: break;
case dt::LINE_BREAK: break;
default:
tokens[i].type = in_drawing ? dt::DRAWING : dt::TEXT;
tokens[i].type = in_drawing ? dt::DRAWING_FULL : dt::TEXT;
if (i > 0 && tokens[i - 1].type == tokens[i].type) {
tokens[i - 1].length += tokens[i].length;
tokens.erase(tokens.begin() + i);

View File

@ -21,13 +21,148 @@
#include "libaegisub/log.h"
#include "libaegisub/util.h"
namespace {
template<typename Source>
class ConvertFloatToInt16 {
Source* src;
public:
ConvertFloatToInt16(Source* src) :src(src) {}
int16_t operator[](size_t idx) const {
Source expanded = src[idx] * 32768;
return expanded < -32768 ? -32768 :
expanded > 32767 ? 32767 :
static_cast<int16_t>(expanded);
}
};
// 8 bits per sample is assumed to be unsigned with a bias of 128,
// while everything else is assumed to be signed with zero bias
class ConvertIntToInt16 {
void* src;
int bytes_per_sample;
public:
ConvertIntToInt16(void* src, int bytes_per_sample) :src(src), bytes_per_sample(bytes_per_sample) {}
const int16_t& operator[](size_t idx) const {
return *reinterpret_cast<int16_t*>(reinterpret_cast<char*>(src) + (idx + 1) * bytes_per_sample - sizeof(int16_t));
}
};
class ConvertUInt8ToInt16 {
uint8_t* src;
public:
ConvertUInt8ToInt16(uint8_t* src) :src(src) {}
int16_t operator[](size_t idx) const {
return int16_t(src[idx]-128) << 8;
}
};
template<typename Source>
class DownmixToMono {
Source src;
int channels;
public:
DownmixToMono(Source src, int channels) :src(src), channels(channels) {}
int16_t operator[](size_t idx) const {
int ret = 0;
// Just average the channels together
for (int i = 0; i < channels; ++i)
ret += src[idx * channels + i];
return ret / channels;
}
};
}
namespace agi {
void AudioProvider::FillBufferInt16Mono(int16_t* buf, int64_t start, int64_t count) const {
if (!float_samples && bytes_per_sample == 2 && channels == 1) {
FillBuffer(buf, start, count);
return;
}
void* buff = malloc(bytes_per_sample * count * channels);
FillBuffer(buff, start, count);
if (channels == 1) {
if (float_samples) {
if (bytes_per_sample == sizeof(float))
for (int64_t i = 0; i < count; ++i)
buf[i] = ConvertFloatToInt16<float>(reinterpret_cast<float*>(buff))[i];
else if (bytes_per_sample == sizeof(double))
for (int64_t i = 0; i < count; ++i)
buf[i] = ConvertFloatToInt16<double>(reinterpret_cast<double*>(buff))[i];
}
else {
if (bytes_per_sample == sizeof(uint8_t))
for (int64_t i = 0; i < count; ++i)
buf[i] = ConvertUInt8ToInt16(reinterpret_cast<uint8_t*>(buff))[i];
else
for (int64_t i = 0; i < count; ++i)
buf[i] = ConvertIntToInt16(buff, bytes_per_sample)[i];
}
}
else {
if (float_samples) {
if (bytes_per_sample == sizeof(float))
for (int64_t i = 0; i < count; ++i)
buf[i] = DownmixToMono<ConvertFloatToInt16<float> >(ConvertFloatToInt16<float>(reinterpret_cast<float*>(buff)), channels)[i];
else if (bytes_per_sample == sizeof(double))
for (int64_t i = 0; i < count; ++i)
buf[i] = DownmixToMono<ConvertFloatToInt16<double> >(ConvertFloatToInt16<double>(reinterpret_cast<double*>(buff)), channels)[i];
}
else {
if (bytes_per_sample == sizeof(uint8_t))
for (int64_t i = 0; i < count; ++i)
buf[i] = DownmixToMono<ConvertUInt8ToInt16>(ConvertUInt8ToInt16(reinterpret_cast<uint8_t*>(buff)), channels)[i];
else
for (int64_t i = 0; i < count; ++i)
buf[i] = DownmixToMono<ConvertIntToInt16>(ConvertIntToInt16(buff, bytes_per_sample), channels)[i];
}
}
free(buff);
}
// This entire file has turned into a mess. For now I'm just following the pattern of the wangqr code, but
// this should really be restructured entirely again. The original type constructor-based system worked very well - it could
// just give downmix/conversion control to the players instead.
void AudioProvider::GetAudioWithVolume(void *buf, int64_t start, int64_t count, double volume) const {
GetAudio(buf, start, count);
if (volume == 1.0) return;
if (bytes_per_sample != 2)
throw agi::InternalError("GetAudioWithVolume called on unconverted audio stream");
int64_t n = count * GetChannels();
if (float_samples) {
if (bytes_per_sample == sizeof(float)) {
float *buff = reinterpret_cast<float *>(buf);
for (int64_t i = 0; i < n; ++i)
buff[i] = static_cast<float>(buff[i] * volume);
} else if (bytes_per_sample == sizeof(double)) {
double *buff = reinterpret_cast<double *>(buf);
for (int64_t i = 0; i < n; ++i)
buff[i] = buff[i] * volume;
}
}
else {
if (bytes_per_sample == sizeof(uint8_t)) {
uint8_t *buff = reinterpret_cast<uint8_t *>(buf);
for (int64_t i = 0; i < n; ++i)
buff[i] = util::mid(0, static_cast<int>(((int) buff[i] - 128) * volume + 128), 0xFF);
} else if (bytes_per_sample == sizeof(int16_t)) {
int16_t *buff = reinterpret_cast<int16_t *>(buf);
for (int64_t i = 0; i < n; ++i)
buff[i] = util::mid(-0x8000, static_cast<int>(buff[i] * volume), 0x7FFF);
} else if (bytes_per_sample == sizeof(int32_t)) {
int32_t *buff = reinterpret_cast<int32_t *>(buf);
for (int64_t i = 0; i < n; ++i)
buff[i] = static_cast<int32_t>(buff[i] * volume);
} else if (bytes_per_sample == sizeof(int64_t)) {
int64_t *buff = reinterpret_cast<int64_t *>(buf);
for (int64_t i = 0; i < n; ++i)
buff[i] = static_cast<int64_t>(buff[i] * volume);
}
}
}
void AudioProvider::GetInt16MonoAudioWithVolume(int16_t *buf, int64_t start, int64_t count, double volume) const {
GetInt16MonoAudio(buf, start, count);
if (volume == 1.0) return;
auto buffer = static_cast<int16_t *>(buf);
for (size_t i = 0; i < (size_t)count; ++i)
@ -75,6 +210,39 @@ void AudioProvider::GetAudio(void *buf, int64_t start, int64_t count) const {
}
}
void AudioProvider::GetInt16MonoAudio(int16_t* buf, int64_t start, int64_t count) const {
if (start < 0) {
memset(buf, 0, sizeof(int16_t) * std::min(-start, count));
buf -= start;
count += start;
start = 0;
}
if (start + count > num_samples) {
int64_t zero_count = std::min(count, start + count - num_samples);
count -= zero_count;
memset(buf + count, 0, sizeof(int16_t) * zero_count);
}
if (count <= 0) return;
try {
FillBufferInt16Mono(buf, start, count);
}
catch (AudioDecodeError const& e) {
// We don't have any good way to report errors here, so just log the
// failure and return silence
LOG_E("audio_provider") << e.GetMessage();
memset(buf, 0, sizeof(int16_t) * count);
return;
}
catch (...) {
LOG_E("audio_provider") << "Unknown audio decoding error";
memset(buf, 0, sizeof(int16_t) * count);
return;
}
}
namespace {
class writer {
io::Save outfile;
@ -114,7 +282,7 @@ void SaveAudioClip(AudioProvider const& provider, fs::path const& path, int star
out.write("WAVEfmt ");
out.write<int32_t>(16); // Size of chunk
out.write<int16_t>(1); // compression format (PCM)
out.write<int16_t>(provider.AreSamplesFloat() ? 3 : 1); // compression format (1: WAVE_FORMAT_PCM, 3: WAVE_FORMAT_IEEE_FLOAT)
out.write<int16_t>(provider.GetChannels());
out.write<int32_t>(provider.GetSampleRate());
out.write<int32_t>(provider.GetSampleRate() * provider.GetChannels() * provider.GetBytesPerSample());

View File

@ -22,119 +22,19 @@
#include <limits>
using namespace agi;
/// Anything integral -> 16 bit signed machine-endian audio converter
namespace {
template<class Target>
class BitdepthConvertAudioProvider final : public AudioProviderWrapper {
int src_bytes_per_sample;
mutable std::vector<uint8_t> src_buf;
class ConvertAudioProvider final : public AudioProviderWrapper {
public:
BitdepthConvertAudioProvider(std::unique_ptr<AudioProvider> src) : AudioProviderWrapper(std::move(src)) {
if (bytes_per_sample > 8)
throw AudioProviderError("Audio format converter: audio with bitdepths greater than 64 bits/sample is currently unsupported");
src_bytes_per_sample = bytes_per_sample;
bytes_per_sample = sizeof(Target);
}
void FillBuffer(void *buf, int64_t start, int64_t count64) const override {
auto count = static_cast<size_t>(count64);
assert(count == count64);
src_buf.resize(count * src_bytes_per_sample * channels);
source->GetAudio(src_buf.data(), start, count);
auto dest = static_cast<int16_t*>(buf);
for (int64_t i = 0; i < count * channels; ++i) {
int64_t sample = 0;
// 8 bits per sample is assumed to be unsigned with a bias of 127,
// while everything else is assumed to be signed with zero bias
if (src_bytes_per_sample == 1)
sample = src_buf[i] - 128;
else {
for (int j = src_bytes_per_sample; j > 0; --j) {
sample <<= 8;
sample += src_buf[i * src_bytes_per_sample + j - 1];
}
}
if (static_cast<size_t>(src_bytes_per_sample) > sizeof(Target))
sample /= 1LL << (src_bytes_per_sample - sizeof(Target)) * 8;
else if (static_cast<size_t>(src_bytes_per_sample) < sizeof(Target))
sample *= 1LL << (sizeof(Target) - src_bytes_per_sample ) * 8;
dest[i] = static_cast<Target>(sample);
}
}
};
/// Floating point -> 16 bit signed machine-endian audio converter
template<class Source, class Target>
class FloatConvertAudioProvider final : public AudioProviderWrapper {
mutable std::vector<Source> src_buf;
public:
FloatConvertAudioProvider(std::unique_ptr<AudioProvider> src) : AudioProviderWrapper(std::move(src)) {
bytes_per_sample = sizeof(Target);
ConvertAudioProvider(std::unique_ptr<AudioProvider> src) : AudioProviderWrapper(std::move(src)) {
float_samples = false;
}
void FillBuffer(void *buf, int64_t start, int64_t count64) const override {
auto count = static_cast<size_t>(count64);
assert(count == count64);
src_buf.resize(count * channels);
source->GetAudio(&src_buf[0], start, count);
auto dest = static_cast<Target*>(buf);
for (size_t i = 0; i < static_cast<size_t>(count * channels); ++i) {
Source expanded;
if (src_buf[i] < 0)
expanded = static_cast<Target>(-src_buf[i] * std::numeric_limits<Target>::min());
else
expanded = static_cast<Target>(src_buf[i] * std::numeric_limits<Target>::max());
dest[i] = expanded < std::numeric_limits<Target>::min() ? std::numeric_limits<Target>::min() :
expanded > std::numeric_limits<Target>::max() ? std::numeric_limits<Target>::max() :
static_cast<Target>(expanded);
}
}
};
/// Non-mono 16-bit signed machine-endian -> mono 16-bit signed machine endian converter
class DownmixAudioProvider final : public AudioProviderWrapper {
int src_channels;
mutable std::vector<int16_t> src_buf;
public:
DownmixAudioProvider(std::unique_ptr<AudioProvider> src) : AudioProviderWrapper(std::move(src)) {
src_channels = channels;
channels = 1;
bytes_per_sample = sizeof(int16_t);
}
void FillBuffer(void *buf, int64_t start, int64_t count64) const override {
auto count = static_cast<size_t>(count64);
assert(count == count64);
src_buf.resize(count * src_channels);
source->GetAudio(&src_buf[0], start, count);
auto dst = static_cast<int16_t*>(buf);
// Just average the channels together
while (count-- > 0) {
int sum = 0;
for (int c = 0; c < src_channels; ++c)
sum += src_buf[count * src_channels + c];
dst[count] = static_cast<int16_t>(sum / src_channels);
}
void FillBuffer(void *buf, int64_t start, int64_t count) const override {
source->GetInt16MonoAudio(reinterpret_cast<int16_t*>(buf), start, count);
}
};
/// Sample doubler with linear interpolation for the samples provider
/// Requires 16-bit mono input
class SampleDoublingAudioProvider final : public AudioProviderWrapper {
@ -177,26 +77,23 @@ std::unique_ptr<AudioProvider> CreateConvertAudioProvider(std::unique_ptr<AudioP
// Ensure 16-bit audio with proper endianness
if (provider->AreSamplesFloat()) {
LOG_D("audio_provider") << "Converting float to S16";
if (provider->GetBytesPerSample() == sizeof(float))
provider = agi::make_unique<FloatConvertAudioProvider<float, int16_t>>(std::move(provider));
else
provider = agi::make_unique<FloatConvertAudioProvider<double, int16_t>>(std::move(provider));
}
if (provider->GetBytesPerSample() != 2) {
LOG_D("audio_provider") << "Converting " << provider->GetBytesPerSample() << " bytes per sample or wrong endian to S16";
provider = agi::make_unique<BitdepthConvertAudioProvider<int16_t>>(std::move(provider));
LOG_D("audio_provider") << "Converting " << provider->GetBytesPerSample() << " bytes per sample to S16";
}
// We currently only support mono audio
if (provider->GetChannels() != 1) {
LOG_D("audio_provider") << "Downmixing to mono from " << provider->GetChannels() << " channels";
provider = agi::make_unique<DownmixAudioProvider>(std::move(provider));
}
// Some players don't like low sample rate audio
while (provider->GetSampleRate() < 32000) {
LOG_D("audio_provider") << "Doubling sample rate";
provider = agi::make_unique<SampleDoublingAudioProvider>(std::move(provider));
if (provider->GetSampleRate() < 32000) {
provider = agi::make_unique<ConvertAudioProvider>(std::move(provider));
while (provider->GetSampleRate() < 32000) {
LOG_D("audio_provider") << "Doubling sample rate";
provider = agi::make_unique<SampleDoublingAudioProvider>(std::move(provider));
}
}
return provider;

View File

@ -38,20 +38,20 @@ class HDAudioProvider final : public AudioProviderWrapper {
void FillBuffer(void *buf, int64_t start, int64_t count) const override {
auto missing = std::min(count, start + count - decoded_samples);
if (missing > 0) {
memset(static_cast<int16_t*>(buf) + count - missing, 0, missing * bytes_per_sample);
memset(static_cast<int16_t*>(buf) + count - missing, 0, missing * bytes_per_sample * channels);
count -= missing;
}
if (count > 0) {
start *= bytes_per_sample;
count *= bytes_per_sample;
start *= bytes_per_sample * channels;
count *= bytes_per_sample * channels;
memcpy(buf, file.read(start, count), count);
}
}
fs::path CacheFilename(fs::path const& dir) {
// Check free space
if ((uint64_t)num_samples * bytes_per_sample > fs::FreeSpace(dir))
if ((uint64_t)num_samples * bytes_per_sample * channels > fs::FreeSpace(dir))
throw AudioProviderError("Not enough free disk space in " + dir.string() + " to cache the audio");
return format("audio-%lld-%lld", time(nullptr),
@ -61,7 +61,7 @@ class HDAudioProvider final : public AudioProviderWrapper {
public:
HDAudioProvider(std::unique_ptr<AudioProvider> src, agi::fs::path const& dir)
: AudioProviderWrapper(std::move(src))
, file(dir / CacheFilename(dir), num_samples * bytes_per_sample)
, file(dir / CacheFilename(dir), num_samples * bytes_per_sample * channels)
{
decoded_samples = 0;
decoder = std::thread([&] {
@ -69,7 +69,7 @@ public:
for (int64_t i = 0; i < num_samples; i += block) {
if (cancelled) break;
block = std::min(block, num_samples - i);
source->GetAudio(file.write(i * bytes_per_sample, block * bytes_per_sample), i, block);
source->GetAudio(file.write(i * bytes_per_sample * channels, block * bytes_per_sample * channels), i, block);
decoded_samples += block;
}
});

View File

@ -29,6 +29,11 @@ class LockAudioProvider final : public agi::AudioProviderWrapper {
source->GetAudio(buf, start, count);
}
void FillBufferInt16Mono(int16_t *buf, int64_t start, int64_t count) const override {
std::unique_lock<std::mutex> lock(mutex);
source->GetInt16MonoAudio(buf, start, count);
}
public:
LockAudioProvider(std::unique_ptr<AudioProvider> src)
: AudioProviderWrapper(std::move(src))

View File

@ -46,14 +46,14 @@ public:
decoded_samples = 0;
try {
blockcache.resize((source->GetNumSamples() * source->GetBytesPerSample() + CacheBlockSize - 1) >> CacheBits);
blockcache.resize((num_samples * bytes_per_sample * channels + CacheBlockSize - 1) >> CacheBits);
}
catch (std::bad_alloc const&) {
throw AudioProviderError("Not enough memory available to cache in RAM");
}
decoder = std::thread([&] {
int64_t readsize = CacheBlockSize / source->GetBytesPerSample();
int64_t readsize = CacheBlockSize / bytes_per_sample / channels;
for (size_t i = 0; i < blockcache.size(); i++) {
if (cancelled) break;
auto actual_read = std::min<int64_t>(readsize, num_samples - i * readsize);
@ -71,20 +71,22 @@ public:
void RAMAudioProvider::FillBuffer(void *buf, int64_t start, int64_t count) const {
auto charbuf = static_cast<char *>(buf);
for (int64_t bytes_remaining = count * bytes_per_sample; bytes_remaining; ) {
for (int64_t bytes_remaining = count * bytes_per_sample * channels; bytes_remaining; ) {
if (start >= decoded_samples) {
memset(charbuf, 0, bytes_remaining);
break;
}
const int i = (start * bytes_per_sample) >> CacheBits;
const int start_offset = (start * bytes_per_sample) & (CacheBlockSize-1);
const int read_size = std::min<int>(bytes_remaining, CacheBlockSize - start_offset);
const int64_t samples_per_block = CacheBlockSize / bytes_per_sample / channels;
const size_t i = start / samples_per_block;
const int start_offset = (start % samples_per_block) * bytes_per_sample * channels;
const int read_size = std::min<int>(bytes_remaining, samples_per_block * bytes_per_sample * channels - start_offset);
memcpy(charbuf, &blockcache[i][start_offset], read_size);
charbuf += read_size;
bytes_remaining -= read_size;
start += read_size / bytes_per_sample;
start += read_size / bytes_per_sample / channels;
}
}
}
@ -93,4 +95,4 @@ namespace agi {
std::unique_ptr<AudioProvider> CreateRAMAudioProvider(std::unique_ptr<AudioProvider> src) {
return agi::make_unique<RAMAudioProvider>(std::move(src));
}
}
}

View File

@ -143,7 +143,7 @@ Framerate::Framerate(double fps)
Framerate::Framerate(int64_t numerator, int64_t denominator, bool drop)
: denominator(denominator)
, numerator(numerator)
, drop(drop && numerator % denominator != 0)
, drop(drop && denominator != 0 && numerator % denominator != 0)
{
if (numerator <= 0 || denominator <= 0)
throw InvalidFramerate("Numerator and denominator must both be greater than zero");
@ -225,7 +225,7 @@ int Framerate::FrameAtTime(int ms, Time type) const {
return int((ms * numerator / denominator - 999) / 1000);
if (ms > timecodes.back())
return int((ms * numerator - last + denominator - 1) / denominator / 1000) + (int)timecodes.size() - 1;
return int((ms * numerator - numerator / 2 - last + numerator - 1) / denominator / 1000) + (int)timecodes.size() - 1;
return (int)distance(lower_bound(timecodes.rbegin(), timecodes.rend(), ms, std::greater<int>()), timecodes.rend()) - 1;
}

View File

@ -39,7 +39,12 @@ namespace agi {
ERROR,
COMMENT,
WHITESPACE,
DRAWING,
DRAWING_FULL,
DRAWING_CMD,
DRAWING_X,
DRAWING_Y,
DRAWING_ENDPOINT_X,
DRAWING_ENDPOINT_Y,
KARAOKE_TEMPLATE,
KARAOKE_VARIABLE
};
@ -49,7 +54,11 @@ namespace agi {
enum {
NORMAL = 0,
COMMENT,
DRAWING,
DRAWING_CMD,
DRAWING_X,
DRAWING_Y,
DRAWING_ENDPOINT_X,
DRAWING_ENDPOINT_Y,
OVERRIDE,
PUNCTUATION,
TAG,

View File

@ -20,8 +20,8 @@
#include <libaegisub/fs_fwd.h>
#include <atomic>
#include <memory>
#include <vector>
#include <memory>
namespace agi {
class AudioProvider {
@ -37,6 +37,7 @@ protected:
bool float_samples = false;
virtual void FillBuffer(void *buf, int64_t start, int64_t count) const = 0;
virtual void FillBufferInt16Mono(int16_t* buf, int64_t start, int64_t count) const;
void ZeroFill(void *buf, int64_t count) const;
@ -45,6 +46,8 @@ public:
void GetAudio(void *buf, int64_t start, int64_t count) const;
void GetAudioWithVolume(void *buf, int64_t start, int64_t count, double volume) const;
void GetInt16MonoAudio(int16_t* buf, int64_t start, int64_t count) const;
void GetInt16MonoAudioWithVolume(int16_t *buf, int64_t start, int64_t count, double volume) const;
int64_t GetNumSamples() const { return num_samples; }
int64_t GetDecodedSamples() const { return decoded_samples; }

View File

@ -46,10 +46,15 @@ namespace agi {
/// @brief Log a message
///
/// If any messages are logged then the dialog will not automatically close
/// when the task finishes so that the user has the chance to read them.
/// If any messages are logged and StayOpen is set (set by default)
/// then the dialog will not automatically close when the task finishes
/// so that the user has the chance to read them.
virtual void Log(std::string const& str)=0;
/// Set whether the dialog should stay open after the task finishes.
/// Defaults to true.
virtual void SetStayOpen(bool stayopen)=0;
/// Has the user asked the task to cancel?
virtual bool IsCancelled()=0;
};

View File

@ -24,6 +24,8 @@
#include <memory>
#include <string>
#pragma once
#undef CreateDirectory
namespace agi {

View File

@ -129,7 +129,6 @@ struct LuaStackcheck {
void dump();
LuaStackcheck(lua_State *L) : L(L), startstack(lua_gettop(L)) { }
~LuaStackcheck() { check_stack(0); }
};
#else
struct LuaStackcheck {

View File

@ -84,6 +84,12 @@ public:
Color const& GetColor() const;
bool const& GetBool() const;
std::string const& GetDefaultString() const;
int64_t const& GetDefaultInt() const;
double const& GetDefaultDouble() const;
Color const& GetDefaultColor() const;
bool const& GetDefaultBool() const;
void SetString(const std::string);
void SetInt(const int64_t);
void SetDouble(const double);
@ -96,6 +102,12 @@ public:
std::vector<Color> const& GetListColor() const;
std::vector<bool> const& GetListBool() const;
std::vector<std::string> const& GetDefaultListString() const;
std::vector<int64_t> const& GetDefaultListInt() const;
std::vector<double> const& GetDefaultListDouble() const;
std::vector<Color> const& GetDefaultListColor() const;
std::vector<bool> const& GetDefaultListBool() const;
void SetListString(std::vector<std::string>);
void SetListInt(std::vector<int64_t>);
void SetListDouble(std::vector<double>);
@ -117,6 +129,7 @@ public:
: OptionValue(std::move(member_name)) \
, value(member_value), value_default(member_value) { } \
type const& GetValue() const { return value; } \
type const& GetDefault() const { return value_default; } \
void SetValue(type new_val) { value = std::move(new_val); NotifyChanged(); } \
OptionType GetType() const { return OptionType::type_name; } \
void Reset() { value = value_default; NotifyChanged(); } \
@ -141,6 +154,7 @@ CONFIG_OPTIONVALUE(Bool, bool)
: OptionValue(std::move(name)) \
, array(value), array_default(value) { } \
std::vector<type> const& GetValue() const { return array; } \
std::vector<type> const& GetDefault() const { return array_default; } \
void SetValue(std::vector<type> val) { array = std::move(val); NotifyChanged(); } \
OptionType GetType() const { return OptionType::List##type_name; } \
void Reset() { array = array_default; NotifyChanged(); } \
@ -156,6 +170,7 @@ CONFIG_OPTIONVALUE_LIST(Bool, bool)
#define CONFIG_OPTIONVALUE_ACCESSORS(ReturnType, Type) \
inline ReturnType const& OptionValue::Get##Type() const { return As<OptionValue##Type>(OptionType::Type)->GetValue(); } \
inline ReturnType const& OptionValue::GetDefault##Type() const { return As<OptionValue##Type>(OptionType::Type)->GetDefault(); } \
inline void OptionValue::Set##Type(ReturnType v) { As<OptionValue##Type>(OptionType::Type)->SetValue(std::move(v)); }
CONFIG_OPTIONVALUE_ACCESSORS(std::string, String)

View File

@ -75,14 +75,13 @@ void Path::FillPlatformSpecificPaths() {
SetToken("?local", home/".aegisub");
#ifdef APPIMAGE_BUILD
agi::fs::path data = exe_dir();
if (data == "") data = home/".aegisub";
SetToken("?data", data);
SetToken("?dictionary", Decode("?data/dictionaries"));
agi::fs::path exe = exe_dir();
agi::fs::path data_from_bin = agi::fs::path(P_DATA).lexically_relative(P_BIN);
SetToken("?data", (exe != "" ? exe/data_from_bin : home/".aegisub").make_preferred());
#else
SetToken("?data", P_DATA);
SetToken("?dictionary", "/usr/share/hunspell");
#endif
SetToken("?dictionary", "/usr/share/hunspell");
#else
agi::fs::path app_support = agi::util::GetApplicationSupportDirectory();

View File

@ -35,7 +35,11 @@ namespace bfs = boost::filesystem;
namespace agi { namespace fs {
std::string ShortName(path const& p) {
std::wstring out(MAX_PATH + 1, 0);
DWORD length = GetShortPathName(p.c_str(), NULL, 0);
if (!length)
return p.string();
std::wstring out(length, 0);
DWORD len = GetShortPathName(p.c_str(), &out[0], out.size());
if (!len)
return p.string();
@ -60,9 +64,7 @@ void Touch(path const& file) {
}
void Copy(fs::path const& from, fs::path const& to) {
acs::CheckFileRead(from);
CreateDirectory(to.parent_path());
acs::CheckDirWrite(to.parent_path());
if (!CopyFile(from.wstring().c_str(), to.wstring().c_str(), false)) {
switch (GetLastError()) {

View File

@ -1,13 +1,13 @@
project('Aegisub', ['c', 'cpp'],
license: 'BSD-3-Clause',
meson_version: '>=0.57.0',
meson_version: '>=0.61.0',
default_options: ['cpp_std=c++14', 'buildtype=debugoptimized'],
version: '3.2.2')
cmake = import('cmake')
if host_machine.system() == 'windows'
add_project_arguments('-DNOMINMAX', '-D_WIN32_WINNT=0x0601', language: 'cpp')
add_project_arguments('-DNOMINMAX', language: 'cpp')
if not get_option('csri').disabled()
add_global_arguments('-DCSRI_NO_EXPORT', language: 'c')
@ -52,12 +52,20 @@ if get_option('debug') and host_machine.system() != 'windows'
add_project_arguments('-D_DEBUG', language: 'cpp')
endif
if get_option('buildtype') == 'release'
add_project_arguments('-DNDEBUG', language: 'cpp')
endif
conf = configuration_data()
conf.set_quoted('P_BIN', bindir)
conf.set_quoted('P_DATA', dataroot)
conf.set_quoted('P_LOCALE', localedir)
if get_option('credit') != ''
conf.set_quoted('BUILD_CREDIT', get_option('credit'))
endif
if get_option('build_appimage')
conf.set('APPIMAGE_BUILD', 1)
endif
conf.set('WITH_UPDATE_CHECKER', get_option('enable_update_checker'))
deps = []
@ -78,10 +86,14 @@ cc = meson.get_compiler('c')
deps += cc.find_library('m', required: false)
deps += cc.find_library('dl', required: false)
iconv_dep = cc.find_library('iconv', required: false)
if not (iconv_dep.found() or cc.has_function('iconv_open'))
iconv_sp = subproject('iconv') # this really needs to be replaced with a proper port
iconv_dep = iconv_sp.get_variable('libiconv_dep')
if meson.version().version_compare('>=0.60.0')
iconv_dep = dependency('iconv', fallback: ['iconv', 'libiconv_dep'])
else
iconv_dep = cc.find_library('iconv', required: false)
if not (iconv_dep.found() or cc.has_function('iconv_open'))
iconv_sp = subproject('iconv') # this really needs to be replaced with a proper port
iconv_dep = iconv_sp.get_variable('libiconv_dep')
endif
endif
deps += iconv_dep
@ -90,7 +102,7 @@ deps += dependency('libass', version: '>=0.9.7',
boost_modules = ['chrono', 'filesystem', 'thread', 'locale', 'regex']
if not get_option('local_boost')
boost_dep = dependency('boost', version: '>=1.50.0',
boost_dep = dependency('boost', version: '>=1.60.0',
modules: boost_modules + ['system'],
required: false,
static: get_option('default_library') == 'static')
@ -220,24 +232,86 @@ foreach dep: [
endif
endforeach
if host_machine.system() == 'windows' and get_option('avisynth').enabled()
conf.set('WITH_AVISYNTH', 1) # bundled separately with installer
needs_ffmpeg = false
if get_option('bestsource').enabled()
conf.set('WITH_BESTSOURCE', 1)
bs = subproject('bestsource')
deps += bs.get_variable('bestsource_dep')
dep_avail += 'BestSource'
needs_ffmpeg = true
endif
if host_machine.system() == 'windows' and not get_option('directsound').disabled()
dsound_dep = cc.find_library('dsound', required: get_option('directsound'))
winmm_dep = cc.find_library('winmm', required: get_option('directsound'))
ole32_dep = cc.find_library('ole32', required: get_option('directsound'))
have_dsound_h = cc.has_header('dsound.h')
if not have_dsound_h and get_option('directsound').enabled()
error('DirectSound enabled but dsound.h not found')
if needs_ffmpeg
conf.set('WITH_FFMPEG', 1)
deps += [
dependency('libavutil', default_options: ['tests=disabled']),
dependency('libswscale', default_options: ['tests=disabled']),
]
endif
if get_option('avisynth').enabled()
conf.set('WITH_AVISYNTH', 1) # bundled separately with installer
dep_avail += 'AviSynth'
avs_opt = cmake.subproject_options()
avs_opt.add_cmake_defines({
'HEADERS_ONLY': true
})
avs = cmake.subproject('avisynth', options: avs_opt)
deps_inc += avs.include_directories('AviSynth-Headers')
if host_machine.system() == 'windows'
deps += cc.find_library('avifil32', required: true)
endif
endif
if get_option('vapoursynth').enabled()
conf.set('WITH_VAPOURSYNTH', 1)
vs_sub = subproject('vapoursynth')
deps_inc += vs_sub.get_variable('vs_inc')
dep_avail += 'VapourSynth'
endif
if host_machine.system() == 'windows'
if not get_option('directsound').disabled()
dsound_dep = cc.find_library('dsound', required: get_option('directsound'))
winmm_dep = cc.find_library('winmm', required: get_option('directsound'))
ole32_dep = cc.find_library('ole32', required: get_option('directsound'))
have_dsound_h = cc.has_header('dsound.h')
if not have_dsound_h and get_option('directsound').enabled()
error('DirectSound enabled but dsound.h not found')
endif
dxguid_dep = cc.find_library('dxguid', required: true)
if dsound_dep.found() and winmm_dep.found() and ole32_dep.found() and dxguid_dep.found() and have_dsound_h
deps += [dsound_dep, winmm_dep, ole32_dep, dxguid_dep]
conf.set('WITH_DIRECTSOUND', 1)
dep_avail += 'DirectSound'
endif
endif
dxguid_dep = cc.find_library('dxguid', required: true)
if dsound_dep.found() and winmm_dep.found() and ole32_dep.found() and dxguid_dep.found() and have_dsound_h
deps += [dsound_dep, winmm_dep, ole32_dep, dxguid_dep]
conf.set('WITH_DIRECTSOUND', 1)
dep_avail += 'DirectSound'
if not get_option('xaudio2').disabled()
have_xaudio_h = cc.has_header('xaudio2.h')
xaudio2_dep = cc.find_library('xaudio2', required: true)
if have_xaudio_h and xaudio2_dep.found()
deps += [xaudio2_dep]
conf.set('WITH_XAUDIO2', 1)
dep_avail += 'XAudio2'
# XAudio2 needs Windows 8 or newer, so we tell meson not to define an older windows or else it can break things.
add_project_arguments('-D_WIN32_WINNT=0x0602', language: 'cpp')
else
# Windows 8 not required if XAudio2 fails to be found. revert for compat.
add_project_arguments('-D_WIN32_WINNT=0x0601', language: 'cpp')
endif
if not have_dsound_h and get_option('xaudio2').enabled()
error('xaudio2 enabled but xaudio2.h not found')
endif
else
# Windows 8 not required if XAudio2 is disabled. revert for compat.
add_project_arguments('-D_WIN32_WINNT=0x0601', language: 'cpp')
endif
endif
@ -337,8 +411,18 @@ subdir('tests')
aegisub_cpp_pch = ['src/include/agi_pre.h']
aegisub_c_pch = ['src/include/agi_pre_c.h']
link_args = []
link_depends = []
if host_machine.system() == 'windows'
manifest_file = configure_file(copy: true, input: 'src/res/aegisub.exe.manifest', output: 'aegisub.exe.manifest')
link_args += ['/MANIFEST:EMBED', '/MANIFESTINPUT:@0@'.format(manifest_file)]
link_depends += manifest_file
endif
aegisub = executable('aegisub', aegisub_src, version_h, acconf,
link_with: [libresrc, libluabins, libaegisub],
link_args: link_args,
link_depends: link_depends,
include_directories: [libaegisub_inc, libresrc_inc, version_inc, deps_inc, include_directories('src')],
cpp_pch: aegisub_cpp_pch,
c_pch: aegisub_c_pch,
@ -346,13 +430,3 @@ aegisub = executable('aegisub', aegisub_src, version_h, acconf,
install_dir: bindir,
dependencies: deps,
win_subsystem: 'windows')
if host_machine.system() == 'windows'
mt_exe = find_program('mt.exe')
apply_manifest = find_program(meson.project_source_root() / 'tools/apply-manifest.py')
custom_target('apply-manifest',
input: aegisub,
output: 'applied_manifest',
command: [apply_manifest, mt_exe, '@INPUT@'],
build_by_default: true)
endif

View File

@ -3,10 +3,13 @@ option('openal', type: 'feature', description: 'OpenAL audio output')
option('libpulse', type: 'feature', description: 'PulseAudio audio output')
option('portaudio', type: 'feature', description: 'PortAudio audio output')
option('directsound', type: 'feature', description: 'DirectSound audio output')
option('default_audio_output', type: 'combo', choices: ['auto', 'ALSA', 'OpenAL', 'PulseAudio', 'PortAudio', 'DirectSound'], description: 'Default audio output')
option('xaudio2', type: 'feature', description: 'XAudio2 audio output')
option('default_audio_output', type: 'combo', choices: ['auto', 'ALSA', 'OpenAL', 'PulseAudio', 'PortAudio', 'DirectSound', 'XAudio2'], description: 'Default audio output')
option('ffms2', type: 'feature', description: 'FFMS2 video source')
option('avisynth', type: 'feature', description: 'AviSynth video source')
option('bestsource', type: 'feature', description: 'BestSource video source')
option('vapoursynth', type: 'feature', description: 'VapourSynth video source')
option('fftw3', type: 'feature', description: 'FFTW3 support')
option('hunspell', type: 'feature', description: 'Hunspell spell checker')
@ -25,3 +28,4 @@ option('update_server', type: 'string', value: 'updates.aegisub.org', descriptio
option('update_url', type: 'string', value: '/trunk', description: 'Base path to use for the update checker')
option('build_osx_bundle', type: 'boolean', value: 'false', description: 'Package Aegisub.app on OSX')
option('build_appimage', type: 'boolean', value: 'false', description: 'Prepare for AppImage packaging')

View File

@ -0,0 +1,61 @@
<?xml version="1.0" encoding="UTF-8"?>
<component type="desktop">
<id>aegisub.desktop</id>
<metadata_license>CC0-1.0</metadata_license>
<project_license>BSD-3-Clause AND MIT AND MPL-1.1</project_license>
<name>Aegisub</name>
<summary>A free, cross-platform open source tool for creating and modifying subtitles</summary>
<description>
<p>Aegisub is a free, cross-platform open source tool for creating and modifying subtitles. Aegisub makes it quick and easy to time subtitles to audio, and features many powerful tools for styling them, including a built-in real-time video preview.</p>
<p>Aegisub was originally created as a tool to make typesetting, particularly in anime fansubs, a less painful experience. At the time of the start of the project, many other programs that supported the Advanced Substation Alpha format lacked (and in many cases, still lack; development on several competing programs have since been dropped for various reasons completely unrelated to Aegisub) many vital functions, or were too buggy and/or unreliable to be really useful.</p>
<p>Since then, Aegisub has grown into a fully fledged, highly customizable subtitle editor. It features a lot of convenient tools to help you with timing, typesetting, editing and translating subtitles, as well as a powerful scripting environment called Automation (originally mostly intended for creating karaoke effects, Automation can now be used much else, including creating macros and various other convenient tools).</p>
<p>Some highlights of Aegisub:</p>
<ul>
<li>Simple and intuitive yet powerful interface for editing subtitles</li>
<li>Support for many formats and character sets</li>
<li>Powerful video mode</li>
<li>Visual typesetting tools</li>
<li>Intuitive and customizable audio timing mode</li>
<li>Fully scriptable through the Automation module</li>
</ul>
</description>
<!-- XXX: appstreamcli validation warning: cid-desktopapp-is-not-rdns
If improving this, the <id> and filename should probably also be changed. -->
<launchable type="desktop-id">aegisub.desktop</launchable>
<kudos>
<kudo>HiDpiIcon</kudo>
<kudo>HighContrast</kudo>
<kudo>UserDocs</kudo>
</kudos>
<screenshots>
<screenshot type="default">
<caption>Typesetting</caption>

</screenshot>
<screenshot>
<caption>Audio video</caption>

</screenshot>
<screenshot>
<caption>Audio timing</caption>

</screenshot>
</screenshots>
<developer_name>Aegisub Group</developer_name>
<url type="bugtracker">https://github.com/Aegisub/Aegisub/issues</url>
<url type="faq">https://aegisub.org/docs/latest/faq</url>
<url type="help">https://aegisub.org/docs/latest</url>
<url type="homepage">https://aegisub.org</url>
<url type="translate">https://github.com/Aegisub/Aegisub</url>
<content_rating type="oars-1.0">
<content_attribute id="social-info">mild</content_attribute>
</content_rating>
<translation type="gettext">aegisub</translation>
<provides>
<binary>aegisub</binary>
</provides>
<releases>
<!-- TODO: automatic replace at config time -->
<release version="3.2.2" date="2014-12-08"/>
</releases>
</component>

View File

@ -1,58 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<component type="desktop">
<id>aegisub.desktop</id>
<metadata_license>CC0-1.0</metadata_license>
<project_license>BSD-3-Clause AND MIT AND MPL-1.1</project_license>
<_name>Aegisub</_name>
<_summary>A free, cross-platform open source tool for creating and modifying subtitles</_summary>
<description>
<_p>Aegisub is a free, cross-platform open source tool for creating and modifying subtitles. Aegisub makes it quick and easy to time subtitles to audio, and features many powerful tools for styling them, including a built-in real-time video preview.</_p>
<_p>Aegisub was originally created as a tool to make typesetting, particularly in anime fansubs, a less painful experience. At the time of the start of the project, many other programs that supported the Advanced Substation Alpha format lacked (and in many cases, still lack; development on several competing programs have since been dropped for various reasons completely unrelated to Aegisub) many vital functions, or were too buggy and/or unreliable to be really useful.</_p>
<_p>Since then, Aegisub has grown into a fully fledged, highly customizable subtitle editor. It features a lot of convenient tools to help you with timing, typesetting, editing and translating subtitles, as well as a powerful scripting environment called Automation (originally mostly intended for creating karaoke effects, Automation can now be used much else, including creating macros and various other convenient tools).</_p>
<_p>Some highlights of Aegisub:</_p>
<ul>
<_li>Simple and intuitive yet powerful interface for editing subtitles</_li>
<_li>Support for many formats and character sets</_li>
<_li>Powerful video mode</_li>
<_li>Visual typesetting tools</_li>
<_li>Intuitive and customizable audio timing mode</_li>
<_li>Fully scriptable through the Automation module</_li>
</ul>
</description>
<launchable type="desktop-id">aegisub.desktop</launchable>
<kudos>
<kudo>HiDpiIcon</kudo>
<kudo>HighContrast</kudo>
<kudo>UserDocs</kudo>
</kudos>
<screenshots>
<screenshot type="default">
<_caption>Typesetting</_caption>

</screenshot>
<screenshot>
<_caption>Audio video</_caption>

</screenshot>
<screenshot>
<_caption>Audio timing</_caption>

</screenshot>
</screenshots>
<developer_name>Aegisub Group</developer_name>
<url type="bugtracker">https://github.com/Aegisub/Aegisub/issues</url>
<url type="faq">http://docs.aegisub.org/manual/FAQ</url>
<url type="help">http://docs.aegisub.org</url>
<url type="homepage">http://www.aegisub.org</url>
<url type="translate">https://sites.google.com/site/rockytdrontransifex/aegisub</url>
<content_rating type="oars-1.0">
<content_attribute id="social-info">mild</content_attribute>
</content_rating>
<translation type="gettext">aegisub</translation>
<provides>
<binary>aegisub</binary>
</provides>
<releases>
<release version="3.2.2" date="2014-12-08"/>
</releases>
</component>

View File

@ -9,7 +9,7 @@ TryExec=@AEGISUB_COMMAND@
Icon=aegisub
Terminal=false
Categories=AudioVideo;AudioVideoEditing;GTK;
_Keywords=subtitles;subtitle;captions;captioning;video;audio;
Keywords=subtitles;subtitle;captions;captioning;video;audio;
MimeType=application/x-srt;text/plain;text/x-ass;text/x-microdvd;text/x-ssa;
StartupNotify=true
StartupWMClass=aegisub

View File

@ -18,8 +18,8 @@ elif host_machine.system() == 'darwin'
else
conf_pkg.set('AEGISUB_COMMAND', 'aegisub')
desktop_template = configure_file(input: 'desktop/aegisub.desktop.template.in',
output: 'aegisub.desktop.template',
desktop_template = configure_file(input: 'desktop/aegisub.desktop.in.in',
output: 'aegisub.desktop.in',
configuration: conf_pkg)
i18n = import('i18n')
@ -30,6 +30,16 @@ else
install: true,
install_dir: datadir / 'applications')
appdata_template = configure_file(input: 'desktop/aegisub.appdata.xml.in.in',
output: 'aegisub.desktop.appdata.xml.in',
configuration: conf_pkg)
i18n.merge_file(input: appdata_template,
output: 'aegisub.appdata.xml',
type: 'xml',
po_dir: '../po',
install: true,
install_dir: datadir / 'metainfo')
aegisub_logos = ['16x16.png', '22x22.png', '24x24.png', '32x32.png', '48x48.png', '64x64.png', 'scalable.svg']
foreach s: aegisub_logos
@ -38,4 +48,10 @@ else
install_data('desktop' / dir / 'aegisub.' + ext,
install_dir: datadir / 'icons' / 'hicolor' / dir / 'apps')
endforeach
if get_option('build_appimage')
install_symlink('AppRun', install_dir: '/', pointing_to: bindir.strip('/') / 'aegisub')
install_symlink('.DirIcon', install_dir: '/', pointing_to: datadir.strip('/') / 'icons' / 'hicolor' / 'scalable' / 'apps' / 'aegisub.svg')
install_symlink('aegisub.desktop', install_dir: '/', pointing_to: datadir.strip('/') / 'applications' / 'aegisub.desktop')
endif
endif

View File

@ -29,6 +29,7 @@ Root: HKLM; Subkey: "SOFTWARE\Classes\Applications\aegisub.exe\SupportedTypes";
Root: HKLM; Subkey: "SOFTWARE\Classes\Applications\aegisub.exe\SupportedTypes"; ValueType: string; ValueName: ".wav"; ValueData: ""; Flags: uninsdeletekey
Root: HKLM; Subkey: "SOFTWARE\Classes\Applications\aegisub.exe\SupportedTypes"; ValueType: string; ValueName: ".ogg"; ValueData: ""; Flags: uninsdeletekey
Root: HKLM; Subkey: "SOFTWARE\Classes\Applications\aegisub.exe\SupportedTypes"; ValueType: string; ValueName: ".avs"; ValueData: ""; Flags: uninsdeletekey
Root: HKLM; Subkey: "SOFTWARE\Classes\Applications\aegisub.exe\SupportedTypes"; ValueType: string; ValueName: ".vpy"; ValueData: ""; Flags: uninsdeletekey
Root: HKLM; Subkey: "SOFTWARE\Classes\Applications\aegisub.exe\SupportedTypes"; ValueType: string; ValueName: ".opus"; ValueData: ""; Flags: uninsdeletekey
Root: HKLM; Subkey: "SOFTWARE\Classes\Applications\aegisub.exe\SupportedTypes"; ValueType: string; ValueName: ".h264"; ValueData: ""; Flags: uninsdeletekey
Root: HKLM; Subkey: "SOFTWARE\Classes\Applications\aegisub.exe\SupportedTypes"; ValueType: string; ValueName: ".hevc"; ValueData: ""; Flags: uninsdeletekey
@ -165,6 +166,7 @@ Root: HKLM; Subkey: "SOFTWARE\Classes\.m4a\OpenWithProgids"; ValueType: string;
Root: HKLM; Subkey: "SOFTWARE\Classes\.wav\OpenWithProgids"; ValueType: string; ValueName: "Aegisub.Audio.1"; Flags: uninsdeletevalue
Root: HKLM; Subkey: "SOFTWARE\Classes\.ogg\OpenWithProgids"; ValueType: string; ValueName: "Aegisub.Media.1"; Flags: uninsdeletevalue
Root: HKLM; Subkey: "SOFTWARE\Classes\.avs\OpenWithProgids"; ValueType: string; ValueName: "Aegisub.Video.1"; Flags: uninsdeletevalue
Root: HKLM; Subkey: "SOFTWARE\Classes\.vpy\OpenWithProgids"; ValueType: string; ValueName: "Aegisub.Media.1"; Flags: uninsdeletevalue
Root: HKLM; Subkey: "SOFTWARE\Classes\.opus\OpenWithProgids"; ValueType: string; ValueName: "Aegisub.Audio.1"; Flags: uninsdeletevalue
Root: HKLM; Subkey: "SOFTWARE\Classes\.h264\OpenWithProgids"; ValueType: string; ValueName: "Aegisub.Video.1"; Flags: uninsdeletevalue
Root: HKLM; Subkey: "SOFTWARE\Classes\.hevc\OpenWithProgids"; ValueType: string; ValueName: "Aegisub.Video.1"; Flags: uninsdeletevalue

View File

@ -30,6 +30,8 @@ DestDir: {app}\automation\include; Source: {#SOURCE_ROOT}\automation\include\uni
DestDir: {app}\automation\include; Source: {#SOURCE_ROOT}\automation\include\utils.lua; Flags: ignoreversion overwritereadonly uninsremovereadonly; Attribs: readonly; Components: main
DestDir: {app}\automation\include; Source: {#SOURCE_ROOT}\automation\include\utils-auto4.lua; Flags: ignoreversion overwritereadonly uninsremovereadonly; Attribs: readonly; Components: main
DestDir: {app}\automation\vapoursynth; Source: {#SOURCE_ROOT}\automation\vapoursynth\aegisub_vs.py; Flags: ignoreversion overwritereadonly uninsremovereadonly; Attribs: readonly; Components: main
#ifdef DEPCTRL
; DepCtrl
DestDir: {userappdata}\Aegisub\automation\include\l0; Source: {#DEPS_DIR}\DependencyControl\modules\*; Flags: ignoreversion recursesubdirs createallsubdirs; Components: macros\modules\depctrl

View File

@ -5,3 +5,8 @@ DestDir: {app}; Source: {#DEPS_DIR}\AvisynthPlus64\x64\AviSynth.dll; Flags: igno
DestDir: {app}; Source: {#DEPS_DIR}\AvisynthPlus64\x64\plugins\DirectShowSource.dll; Flags: ignoreversion; Components: main
; VSFilter
DestDir: {app}\csri; Source: {#DEPS_DIR}\VSFilter\x64\VSFilter.dll; Flags: ignoreversion; Components: main
; VapourSynth
DestDir: {app}\vapoursynth; Source: {#DEPS_DIR}\L-SMASH-Works\libvslsmashsource.dll; Flags: ignoreversion; Components: vapoursynth
DestDir: {app}\vapoursynth; Source: {#DEPS_DIR}\bestaudiosource\win64\BestAudioSource.dll; Flags: ignoreversion; Components: vapoursynth
DestDir: {app}\vapoursynth; Source: {#DEPS_DIR}\SCXVid\libscxvid.dll; Flags: ignoreversion; Components: vapoursynth
DestDir: {app}\vapoursynth; Source: {#DEPS_DIR}\WWXD\libwwxd64.dll; Flags: ignoreversion; Components: vapoursynth

View File

@ -1,5 +1,6 @@
[Components]
Name: "main"; Description: "Main Files"; Types: full compact custom; Flags: fixed
Name: "vapoursynth"; Description: "Bundled VapourSynth Plugins"; Types: full
Name: "macros"; Description: "Automation Scripts"; Types: full
Name: "macros\bundled"; Description: "Bundled macros"; Types: full
Name: "macros\demos"; Description: "Example macros/Demos"; Types: full

View File

@ -2,6 +2,7 @@
[Files]
DestDir: {tmp}; Source: "{#DEPS_DIR}\VC_redist\VC_redist.x{#ARCH}.exe"; Flags: nocompression deleteafterinstall
DestDir: {app}; Source: "{#DEPS_DIR}\XAudio2_redist\build\native\release\bin\x{#ARCH}\xaudio2_9redist.dll"; DestName: "XAudio2_9.dll"; OnlyBelowVersion: 10.0
[Run]
Filename: {tmp}\VC_redist.x{#ARCH}.exe; StatusMsg: {cm:InstallRuntime}; Parameters: "/install /quiet /norestart"

View File

@ -1,5 +1,5 @@
; This file declares all installables related to spell checking and thesaurii in Aegisub
[Files]
Source: {#DEPS_DIR}\dictionaries\en_US.aff; DestDir: {app}\dictionaries; Flags: skipifsourcedoesntexist ignoreversion
Source: {#DEPS_DIR}\dictionaries\en_US.dic; DestDir: {app}\dictionaries; Flags: skipifsourcedoesntexist ignoreversion
Source: {#DEPS_DIR}\dictionaries\en_US.aff; DestDir: {app}\dictionaries; Flags: ignoreversion; Components: dictionaries/en_US
Source: {#DEPS_DIR}\dictionaries\en_US.dic; DestDir: {app}\dictionaries; Flags: ignoreversion; Components: dictionaries/en_US

View File

@ -51,15 +51,26 @@ Copy-New-Item $InstallerDir\bin\aegisub.exe $PortableOutputDir
Write-Output 'Copying - translations'
Copy-New-Items "$InstallerDir\share\locale\*" "$PortableOutputDir\locale" -Recurse
Write-Output 'Copying - dictionaries'
Copy-New-Item $InstallerDepsDir\dictionaries\en_US.aff $PortableOutputDir\dictionaries
Copy-New-Item $InstallerDepsDir\dictionaries\en_US.dic $PortableOutputDir\dictionaries
Write-Output 'Copying - codecs'
Write-Output 'Copying - codecs\Avisynth'
Copy-New-Item $InstallerDepsDir\AvisynthPlus64\x86-64\DevIL.dll $PortableOutputDir
Copy-New-Item $InstallerDepsDir\AvisynthPlus64\x86-64\AviSynth.dll $PortableOutputDir
Copy-New-Item $InstallerDepsDir\AvisynthPlus64\x86-64\plugins\DirectShowSource.dll $PortableOutputDir
Copy-New-Item $InstallerDepsDir\AvisynthPlus64\x64\system\DevIL.dll $PortableOutputDir
Copy-New-Item $InstallerDepsDir\AvisynthPlus64\x64\AviSynth.dll $PortableOutputDir
Copy-New-Item $InstallerDepsDir\AvisynthPlus64\x64\plugins\DirectShowSource.dll $PortableOutputDir
Write-Output 'Copying - codecs\VapourSynth'
Copy-New-Item $InstallerDepsDir\L-SMASH-Works\libvslsmashsource.dll $PortableOutputDir\vapoursynth
Copy-New-Item $InstallerDepsDir\bestaudiosource\win64\BestAudioSource.dll $PortableOutputDir\vapoursynth
Copy-New-Item $InstallerDepsDir\SCXVid\libscxvid.dll $PortableOutputDir\vapoursynth
Copy-New-Item $InstallerDepsDir\WWXD\libwwxd64.dll $PortableOutputDir\vapoursynth
Write-Output 'Copying - codecs\VSFilter'
Copy-New-Item $InstallerDepsDir\VSFilter\x64\VSFilter.dll $PortableOutputDir\csri
Write-Output 'Copying - runtimes\MS-CRT'
Copy-New-Item $InstallerDepsDir\VC_redist\VC_redist.x64.exe $PortableOutputDir\Microsoft.CRT
Write-Output 'Copying - redist\XAudio2_9'
Copy-New-Item $InstallerDepsDir\XAudio2_redist\build\native\release\bin\x64\xaudio2_9redist.dll $PortableOutputDir\Redist
Rename-Item $PortableOutputDir\Redist\xaudio2_9redist.dll $PortableOutputDir\Redist\XAudio2_9.dll
Write-Output 'Copying - automation'
Copy-New-Items "$InstallerDir\share\aegisub\automation\*" "$PortableOutputDir\automation\" -Recurse

View File

@ -1,3 +1,6 @@
packages/desktop/aegisub.desktop.in.in
packages/desktop/aegisub.appdata.xml.in.in
src/ass_style.cpp
src/audio_box.cpp
src/audio_karaoke.cpp

File diff suppressed because it is too large Load Diff

11645
po/ar.po

File diff suppressed because it is too large Load Diff

11304
po/bg.po

File diff suppressed because it is too large Load Diff

16069
po/ca.po

File diff suppressed because it is too large Load Diff

13293
po/cs.po

File diff suppressed because it is too large Load Diff

11945
po/da.po

File diff suppressed because it is too large Load Diff

11022
po/de.po

File diff suppressed because it is too large Load Diff

12737
po/el.po

File diff suppressed because it is too large Load Diff

12952
po/es.po

File diff suppressed because it is too large Load Diff

14450
po/eu.po

File diff suppressed because it is too large Load Diff

11933
po/fa.po

File diff suppressed because it is too large Load Diff

12490
po/fi.po

File diff suppressed because it is too large Load Diff

11410
po/fr_FR.po

File diff suppressed because it is too large Load Diff

11617
po/gl.po

File diff suppressed because it is too large Load Diff

14772
po/hu.po

File diff suppressed because it is too large Load Diff

14412
po/id.po

File diff suppressed because it is too large Load Diff

13622
po/it.po

File diff suppressed because it is too large Load Diff

12035
po/ja.po

File diff suppressed because it is too large Load Diff

13556
po/ko.po

File diff suppressed because it is too large Load Diff

View File

@ -1,19 +1,22 @@
#!/bin/sh
set -e
maybe_append() {
while read -r msg; do
msgfile=$(echo $msg | cut -d'|' -f1)
msgline=$(echo $msg | cut -d'|' -f2)
msgid=$(echo $msg | cut -d'|' -f3-)
msgfile=$(printf '%s' "$msg" | cut -d'|' -f1)
msgline=$(printf '%s' "$msg" | cut -d'|' -f2)
msgid=$(printf '%s' "$msg" | cut -d'|' -f3-)
if ! grep -Fq "msgid $msgid" aegisub.pot; then
echo "\n#: $msgfile:$msgline\nmsgid $msgid\nmsgstr \"\"\n" >> aegisub.pot
printf "\n#: %s:%s\nmsgid %s\nmsgstr \"\"\n\n" \
"$msgfile" "$msgline" "$msgid" >> aegisub.pot
fi
done
}
find ../src ../src/command -name \*.cpp -o -name \*.h \
| xgettext --files-from=- -o - --c++ -k_ -kSTR_MENU -kSTR_DISP -kSTR_HELP -kfmt_tl -kfmt_plural:2,3 -kwxT -kwxPLURAL:1,2 \
find ../src ../src/command -name '*.cpp' -o -name '*.h' \
| xgettext --files-from=- -o - --c++ --sort-by-file \
-k_ -kSTR_MENU -kSTR_DISP -kSTR_HELP -kfmt_tl -kfmt_plural:2,3 -kwxT -kwxPLURAL:1,2 \
| sed 's/SOME DESCRIPTIVE TITLE./Aegisub 3.2/' \
| sed 's/YEAR/2005-2014/' \
| sed "s/THE PACKAGE'S COPYRIGHT HOLDER/Rodrigo Braz Monteiro, Niels Martin Hansen, Thomas Goyne et. al./" \
@ -33,28 +36,27 @@ grep '"[A-Za-z ]\+" : {' -n ../src/libresrc/default_hotkey.json \
| sed 's/^\([0-9]\+:\).*\("[^"]\+"\).*$/default_hotkey.json|\1|\2/' \
| maybe_append
find ../automation -name *.lua \
| xargs grep tr\"[^\"]\*\" -o -n \
find ../automation -name '*.lua' \
| LC_ALL=C sort \
| xargs grep 'tr"[^"]*"' -o -n \
| sed 's/\(.*\):\([0-9]\+\):tr\(".*"\)/\1|\2|\3/' \
| sed 's/\\/\\\\\\\\/g' \
| maybe_append
for i in 'Name' 'GenericName' 'Comment' 'Keywords'
do
grep ^_$i -n ../packages/desktop/aegisub.desktop.template.in \
| sed 's/\([0-9]\+\):[^=]\+=\(.*\)$/aegisub.desktop|\1|"\2"/' \
| maybe_append
done
xgettext ../packages/desktop/aegisub.desktop.in.in \
--language=Desktop --join-existing --omit-header -o aegisub.pot
if which xmlstarlet >/dev/null 2>&1 && which jq >/dev/null 2>&1; then
for i in 'name' 'summary' 'p' 'li' 'caption'; do
xmlstarlet sel -t -v "//_$i" ../packages/desktop/aegisub.appdata.xml.template.in | jq -R .
done | nl -v0 -w1 -s'|' | sed -re 's/^/aegisub.appdata.xml|/' | maybe_append
fi
xgettext ../packages/desktop/aegisub.appdata.xml.in.in \
--language=AppData --join-existing --omit-header -o aegisub.pot
grep '^_[A-Za-z0-9]*=.*' ../packages/win_installer/fragment_strings.iss.in | while read line
do
echo "$line" \
printf '%s\n' "$line" \
| sed 's/[^=]*=\(.*\)/packages\/win_installer\/fragment_strings.iss|1|"\1"/' \
| maybe_append
done
for lang in $(cat LINGUAS) ; do
# If using gettext < 0.21, run twice to avoid reversing order of old strings
# ref: https://savannah.gnu.org/bugs/?58778
msgmerge --update --backup=none --no-fuzzy-matching --sort-by-file "$lang".po aegisub.pot
done

View File

@ -1,4 +1,14 @@
i18n = import('i18n')
# This is currently busted on OSX
# and incomplete on any platform.
# It misses translatable strings not directly found in either
# C++ source, desktop or appdata file. This affects strings
# of the Windows installer (iss), from Lua scripts and JSON files.
# Until a solution is found, POT updates should continue to use make_pot.sh.
i18n.gettext('aegisub',
args: [
'-k_', '-kSTR_MENU', '-kSTR_DISP', '-kSTR_HELP', '-kwxT',
'-kfmt_tl', '-kfmt_plural:2,3', '-kwxPLURAL:1,2',
'--sort-by-file'
],
install_dir: localedir)

11676
po/nl.po

File diff suppressed because it is too large Load Diff

12134
po/pl.po

File diff suppressed because it is too large Load Diff

12432
po/pt_BR.po

File diff suppressed because it is too large Load Diff

13277
po/pt_PT.po

File diff suppressed because it is too large Load Diff

11614
po/ru.po

File diff suppressed because it is too large Load Diff

13315
po/sr_RS.po

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

11441
po/uk_UA.po

File diff suppressed because it is too large Load Diff

15673
po/vi.po

File diff suppressed because it is too large Load Diff

12396
po/zh_CN.po

File diff suppressed because it is too large Load Diff

12389
po/zh_TW.po

File diff suppressed because it is too large Load Diff

View File

@ -26,9 +26,11 @@
// POSSIBILITY OF SUCH DAMAGE.
//
// Aegisub Project http://www.aegisub.org/
#pragma once
#include "ass_entry.h"
#include "ass_override.h"
#include "fold_controller.h"
#include <libaegisub/ass/time.h>
@ -124,6 +126,9 @@ struct AssDialogueBase {
int Row = -1;
/// Data describing line folds starting or ending at this line
FoldInfo Fold;
/// Is this a comment line?
bool Comment = false;
/// Layer number

View File

@ -175,6 +175,8 @@ int AssFile::Commit(wxString const& desc, int type, int amend_id, AssDialogue *s
event.Row = i++;
}
AnnouncePreCommit(type, single_line);
PushState({desc, &amend_id, single_line});
AnnounceCommit(type, single_line);
@ -200,6 +202,12 @@ bool AssFile::CompEffect(AssDialogue const& lft, AssDialogue const& rgt) {
bool AssFile::CompLayer(AssDialogue const& lft, AssDialogue const& rgt) {
return lft.Layer < rgt.Layer;
}
bool AssFile::CompText(AssDialogue const& lft, AssDialogue const& rgt) {
return lft.Text < rgt.Text;
}
bool AssFile::CompTextStripped(AssDialogue const& lft, AssDialogue const& rgt) {
return lft.GetStrippedText() < rgt.GetStrippedText();
}
void AssFile::Sort(CompFunc comp, std::set<AssDialogue*> const& limit) {
Sort(Events, comp, limit);
@ -234,10 +242,43 @@ uint32_t AssFile::AddExtradata(std::string const& key, std::string const& value)
return data.id;
}
}
Extradata.push_back(ExtradataEntry{next_extradata_id, key, value});
Extradata.push_back(ExtradataEntry{next_extradata_id, 0, key, value});
return next_extradata_id++; // return old value, then post-increment
}
void AssFile::SetExtradataValue(AssDialogue& line, std::string const& key, std::string const& value, bool del) {
std::vector<uint32_t> id_list = line.ExtradataIds;
std::vector<bool> to_erase(id_list.size());
bool dirty = false;
bool found = false;
std::vector<ExtradataEntry> entry_list = GetExtradata(id_list);
for (int i = entry_list.size() - 1; i >= 0; i--) {
if (entry_list[i].key == key) {
if (!del && entry_list[i].value == value) {
found = true;
} else {
to_erase[i] = true;
dirty = true;
}
}
}
// The key is already set, we don't need to change anything
if (found && !dirty)
return;
for (int i = id_list.size() - 1; i >= 0; i--) {
if (to_erase[i])
id_list.erase(id_list.begin() + i, id_list.begin() + i + 1);
}
if (!del && !found)
id_list.push_back(AddExtradata(key, value));
line.ExtradataIds = id_list;
}
namespace {
struct extradata_id_cmp {
bool operator()(ExtradataEntry const& e, uint32_t id) {
@ -299,10 +340,16 @@ void AssFile::CleanExtradata() {
}
}
for (ExtradataEntry &e : Extradata) {
if (ids_used.count(e.id))
e.expiration_counter = 0;
else
e.expiration_counter++;
}
if (ids_used.size() != Extradata.size()) {
// Erase all no-longer-used extradata entries
Extradata.erase(std::remove_if(begin(Extradata), end(Extradata), [&](ExtradataEntry const& e) {
return !ids_used.count(e.id);
return e.expiration_counter >= 10;
}), end(Extradata));
}
}

View File

@ -27,6 +27,8 @@
//
// Aegisub Project http://www.aegisub.org/
#pragma once
#include "ass_entry.h"
#include <libaegisub/fs_fwd.h>
@ -48,6 +50,7 @@ using EntryList = typename boost::intrusive::make_list<T, boost::intrusive::cons
struct ExtradataEntry {
uint32_t id;
int expiration_counter;
std::string key;
std::string value;
};
@ -81,7 +84,10 @@ struct ProjectProperties {
class AssFile {
/// A set of changes has been committed to the file (AssFile::COMMITType)
agi::signal::Signal<int, const AssDialogue*> AnnounceCommit;
agi::signal::Signal<int, const AssDialogue*> AnnouncePreCommit;
agi::signal::Signal<AssFileCommit> PushState;
void SetExtradataValue(AssDialogue& line, std::string const& key, std::string const& value, bool del);
public:
/// The lines in the file
std::vector<AssInfo> Info;
@ -133,6 +139,10 @@ public:
uint32_t AddExtradata(std::string const& key, std::string const& value);
/// Fetch all extradata entries from a list of IDs
std::vector<ExtradataEntry> GetExtradata(std::vector<uint32_t> const& id_list) const;
/// Set an extradata kex:value pair for a dialogue line, clearing previous values for this key if necessary
void SetExtradataValue(AssDialogue& line, std::string const& key, std::string const& value) { SetExtradataValue(line, key, value, false); };
/// Delete any extradata values for the given key
void DeleteExtradataValue(AssDialogue& line, std::string const& key) { SetExtradataValue(line, key, "", true); };
/// Remove unreferenced extradata entries
void CleanExtradata();
@ -166,8 +176,11 @@ public:
COMMIT_DIAG_FULL = COMMIT_DIAG_META | COMMIT_DIAG_TIME | COMMIT_DIAG_TEXT,
/// Extradata entries were added/modified/removed
COMMIT_EXTRADATA = 0x100,
/// Folds were added or removed
COMMIT_FOLD = COMMIT_EXTRADATA,
};
DEFINE_SIGNAL_ADDERS(AnnouncePreCommit, AddPreCommitListener)
DEFINE_SIGNAL_ADDERS(AnnounceCommit, AddCommitListener)
DEFINE_SIGNAL_ADDERS(PushState, AddUndoManager)
@ -194,6 +207,10 @@ public:
static bool CompEffect(AssDialogue const& lft, AssDialogue const& rgt);
/// Compare based on layer
static bool CompLayer(AssDialogue const& lft, AssDialogue const& rgt);
/// Compare based on text
static bool CompText(AssDialogue const& lft, AssDialogue const& rgt);
/// Compare based on stripped text
static bool CompTextStripped(AssDialogue const& lft, AssDialogue const& rgt);
/// @brief Sort the dialogue lines in this file
/// @param comp Comparison function to use. Defaults to sorting by start time.

View File

@ -219,7 +219,7 @@ void AssParser::ParseExtradataLine(std::string const &data) {
// ensure next_extradata_id is always at least 1 more than the largest existing id
target->next_extradata_id = std::max(id+1, target->next_extradata_id);
target->Extradata.push_back(ExtradataEntry{id, std::move(key), std::move(value)});
target->Extradata.push_back(ExtradataEntry{id, 0, std::move(key), std::move(value)});
}
}

View File

@ -25,6 +25,12 @@
#include <libaegisub/dispatch.h>
#if BOOST_VERSION >= 106900
#include <boost/gil.hpp>
#else
#include <boost/gil/gil_all.hpp>
#endif
enum {
NEW_SUBS_FILE = -1,
SUBS_FILE_ALREADY_LOADED = -2
@ -81,6 +87,55 @@ std::shared_ptr<VideoFrame> AsyncVideoProvider::ProcFrame(int frame_number, doub
return frame;
}
VideoFrame AsyncVideoProvider::GetBlankFrame(bool white) {
VideoFrame result;
result.width = GetWidth();
result.height = GetHeight();
result.pitch = result.width * 4;
result.flipped = false;
result.data.resize(result.pitch * result.height, white ? 255 : 0);
return result;
}
VideoFrame AsyncVideoProvider::GetSubtitles(double time) {
// We want to combine all transparent subtitle layers onto one layer.
// Instead of alpha blending them all together, which can be messy and cause
// rounding errors, we draw them once on a black frame and once on a white frame,
// and solve for the color and alpha. This has the benefit of being independent
// of the subtitle provider, as long as the provider works by alpha blending.
VideoFrame frame_black = GetBlankFrame(false);
if (!subs) return frame_black;
VideoFrame frame_white = GetBlankFrame(true);
subs_provider->LoadSubtitles(subs.get());
subs_provider->DrawSubtitles(frame_black, time / 1000.);
subs_provider->DrawSubtitles(frame_white, time / 1000.);
using namespace boost::gil;
auto blackview = interleaved_view(frame_black.width, frame_black.height, (bgra8_pixel_t*) frame_black.data.data(), frame_black.width * 4);
auto whiteview = interleaved_view(frame_white.width, frame_white.height, (bgra8_pixel_t*) frame_white.data.data(), frame_white.width * 4);
transform_pixels(blackview, whiteview, blackview, [=](const bgra8_pixel_t black, const bgra8_pixel_t white) -> bgra8_pixel_t {
int a = 255 - (white[0] - black[0]);
bgra8_pixel_t ret;
if (a == 0) {
ret[0] = 0;
ret[1] = 0;
ret[2] = 0;
ret[3] = 0;
} else {
ret[0] = black[0] / (a / 255.);
ret[1] = black[1] / (a / 255.);
ret[2] = black[2] / (a / 255.);
ret[3] = a;
}
return ret;
});
return frame_black;
}
static std::unique_ptr<SubtitlesProvider> get_subs_provider(wxEvtHandler *evt_handler, agi::BackgroundRunner *br) {
try {
return SubtitlesProviderFactory::GetProvider(br);

View File

@ -78,6 +78,9 @@ class AsyncVideoProvider {
std::vector<std::shared_ptr<VideoFrame>> buffers;
// Returns a monochromatic frame with the current dimensions
VideoFrame GetBlankFrame(bool white);
public:
/// @brief Load the passed subtitle file
/// @param subs File to load
@ -108,6 +111,15 @@ public:
/// @brief raw Get raw frame without subtitles
std::shared_ptr<VideoFrame> GetFrame(int frame, double time, bool raw = false);
/// @brief Synchronously get the subtitles with transparent background
/// @brief time Exact start time of the frame in seconds
///
/// This function is not used for drawing the subtitles on the screen and is not
/// guaranteed that drawing the resulting image on the current raw frame exactly
/// results in the current rendered frame with subtitles. This function is for
/// purposes like copying the current subtitles to the clipboard.
VideoFrame GetSubtitles(double time);
/// Ask the video provider to change YCbCr matricies
void SetColorSpace(std::string const& matrix);

View File

@ -129,7 +129,7 @@ END_EVENT_TABLE()
void AudioBox::OnMouseWheel(wxMouseEvent &evt) {
if (!ForwardMouseWheelEvent(audioDisplay, evt))
return;
bool zoom = evt.CmdDown() != OPT_GET("Audio/Wheel Default to Zoom")->GetBool();
bool zoom = evt.CmdDown() != OPT_GET("Audio/Wheel Default to Zoom")->GetBool() || evt.ShiftDown();
if (!zoom) {
int amount = -evt.GetWheelRotation();
// If the user did a horizontal scroll the amount should be inverted
@ -145,7 +145,11 @@ void AudioBox::OnMouseWheel(wxMouseEvent &evt) {
mouse_zoom_accum += evt.GetWheelRotation();
int zoom_delta = mouse_zoom_accum / evt.GetWheelDelta();
mouse_zoom_accum %= evt.GetWheelDelta();
SetHorizontalZoom(audioDisplay->GetZoomLevel() + zoom_delta);
if (evt.ShiftDown()) {
SetVerticalZoom(OPT_GET("Audio/Zoom/Vertical")->GetInt() + 3 * zoom_delta);
} else {
SetHorizontalZoom(audioDisplay->GetZoomLevel() + zoom_delta);
}
}
}
@ -179,10 +183,15 @@ void AudioBox::SetHorizontalZoom(int new_zoom) {
}
void AudioBox::OnVerticalZoom(wxScrollEvent &event) {
int pos = mid(1, event.GetPosition(), 100);
SetVerticalZoom(event.GetPosition());
}
void AudioBox::SetVerticalZoom(int new_zoom) {
int pos = mid(1, new_zoom, 100);
OPT_SET("Audio/Zoom/Vertical")->SetInt(pos);
double value = pow(pos / 50.0, 3);
audioDisplay->SetAmplitudeScale(value);
VerticalZoom->SetValue(pos);
if (!VolumeBar->IsEnabled()) {
VolumeBar->SetValue(pos);
controller->SetVolume(value);

View File

@ -73,6 +73,7 @@ class AudioBox final : public wxSashWindow {
int mouse_zoom_accum = 0;
void SetHorizontalZoom(int new_zoom);
void SetVerticalZoom(int new_zoom);
void OnAudioOpen();
void OnHorizontalZoom(wxScrollEvent &event);
void OnMouseWheel(wxMouseEvent &evt);

View File

@ -760,6 +760,15 @@ void AudioDisplay::ReloadRenderingSettings()
spectrum_width[spectrum_quality],
spectrum_distance[spectrum_quality]);
// Frequency curve
int64_t spectrum_freq_curve = OPT_GET("Audio/Renderer/Spectrum/FreqCurve")->GetInt();
spectrum_freq_curve = mid<int64_t>(0, spectrum_freq_curve, 4);
const float spectrum_fref_pos [] = { 0.001f, 0.125f, 0.333f, 0.425f, 0.999f };
audio_spectrum_renderer->set_reference_frequency_position (
spectrum_fref_pos [spectrum_freq_curve]
);
audio_renderer_provider = std::move(audio_spectrum_renderer);
}
else
@ -1229,6 +1238,7 @@ void AudioDisplay::OnAudioOpen(agi::AudioProvider *provider)
OPT_SUB("Colour/Audio Display/Spectrum", &AudioDisplay::ReloadRenderingSettings, this),
OPT_SUB("Colour/Audio Display/Waveform", &AudioDisplay::ReloadRenderingSettings, this),
OPT_SUB("Audio/Renderer/Spectrum/Quality", &AudioDisplay::ReloadRenderingSettings, this),
OPT_SUB("Audio/Renderer/Spectrum/FreqCurve", &AudioDisplay::ReloadRenderingSettings, this),
});
OnTimingController();
}

View File

@ -43,6 +43,7 @@
std::unique_ptr<AudioPlayer> CreateAlsaPlayer(agi::AudioProvider *providers, wxWindow *window);
std::unique_ptr<AudioPlayer> CreateDirectSoundPlayer(agi::AudioProvider *providers, wxWindow *window);
std::unique_ptr<AudioPlayer> CreateDirectSound2Player(agi::AudioProvider *providers, wxWindow *window);
std::unique_ptr<AudioPlayer> CreateXAudio2Player(agi::AudioProvider *providers, wxWindow *window);
std::unique_ptr<AudioPlayer> CreateOpenALPlayer(agi::AudioProvider *providers, wxWindow *window);
std::unique_ptr<AudioPlayer> CreatePortAudioPlayer(agi::AudioProvider *providers, wxWindow *window);
std::unique_ptr<AudioPlayer> CreatePulseAudioPlayer(agi::AudioProvider *providers, wxWindow *window);
@ -63,6 +64,9 @@ namespace {
{"DirectSound-old", CreateDirectSoundPlayer, false},
{"DirectSound", CreateDirectSound2Player, false},
#endif
#ifdef WITH_XAUDIO2
{"Xaudio2", CreateXAudio2Player, false},
#endif
#ifdef WITH_OPENAL
{"OpenAL", CreateOpenALPlayer, false},
#endif

View File

@ -79,6 +79,7 @@ class AlsaPlayer final : public AudioPlayer {
std::atomic<double> volume{1.0};
int64_t start_position = 0;
std::atomic<int64_t> end_position{0};
bool fallback_mono16 = false; // whether to convert to 16 bit mono. FIXME: more flexible conversion
std::mutex position_mutex;
int64_t last_position = 0;
@ -88,6 +89,8 @@ class AlsaPlayer final : public AudioPlayer {
std::thread thread;
snd_pcm_format_t GetPCMFormat(const agi::AudioProvider *provider);
void PlaybackThread();
void UpdatePlaybackPosition(snd_pcm_t *pcm, int64_t position)
@ -115,6 +118,36 @@ public:
void SetEndPosition(int64_t pos) override;
};
snd_pcm_format_t AlsaPlayer::GetPCMFormat(const agi::AudioProvider *provider) {
if (provider->AreSamplesFloat()) {
switch (provider->GetBytesPerSample()) {
case 4:
return SND_PCM_FORMAT_FLOAT_LE;
case 8:
return SND_PCM_FORMAT_FLOAT64_LE;
default:
fallback_mono16 = true;
return SND_PCM_FORMAT_S16_LE;
}
} else {
switch (provider->GetBytesPerSample()) {
case 1:
return SND_PCM_FORMAT_U8;
case 2:
return SND_PCM_FORMAT_S16_LE;
case 3:
return SND_PCM_FORMAT_S24_LE;
case 4:
return SND_PCM_FORMAT_S32_LE;
case 8:
return SND_PCM_FORMAT_S32_LE;
default:
fallback_mono16 = true;
return SND_PCM_FORMAT_S16_LE;
}
}
}
void AlsaPlayer::PlaybackThread()
{
std::unique_lock<std::mutex> lock(mutex);
@ -126,24 +159,11 @@ void AlsaPlayer::PlaybackThread()
BOOST_SCOPE_EXIT_ALL(&) { snd_pcm_close(pcm); };
do_setup:
snd_pcm_format_t pcm_format;
switch (provider->GetBytesPerSample())
{
case 1:
LOG_D("audio/player/alsa") << "format U8";
pcm_format = SND_PCM_FORMAT_U8;
break;
case 2:
LOG_D("audio/player/alsa") << "format S16_LE";
pcm_format = SND_PCM_FORMAT_S16_LE;
break;
default:
return;
}
snd_pcm_format_t pcm_format = GetPCMFormat(provider);
if (snd_pcm_set_params(pcm,
pcm_format,
SND_PCM_ACCESS_RW_INTERLEAVED,
provider->GetChannels(),
fallback_mono16 ? 1 : provider->GetChannels(),
provider->GetSampleRate(),
1, // allow resample
100*1000 // 100 milliseconds latency
@ -151,7 +171,7 @@ do_setup:
return;
LOG_D("audio/player/alsa") << "set pcm params";
size_t framesize = provider->GetChannels() * provider->GetBytesPerSample();
size_t framesize = fallback_mono16 ? sizeof(int16_t) : provider->GetChannels() * provider->GetBytesPerSample();
while (true)
{
@ -175,7 +195,11 @@ do_setup:
{
auto avail = std::min(snd_pcm_avail(pcm), (snd_pcm_sframes_t)(end_position-position));
decode_buffer.resize(avail * framesize);
provider->GetAudioWithVolume(decode_buffer.data(), position, avail, volume);
if (fallback_mono16) {
provider->GetInt16MonoAudioWithVolume(reinterpret_cast<int16_t*>(decode_buffer.data()), position, avail, volume);
} else {
provider->GetAudioWithVolume(decode_buffer.data(), position, avail, volume);
}
snd_pcm_sframes_t written = 0;
while (written <= 0)
@ -235,7 +259,11 @@ do_setup:
{
decode_buffer.resize(avail * framesize);
provider->GetAudioWithVolume(decode_buffer.data(), position, avail, volume);
if (fallback_mono16) {
provider->GetInt16MonoAudioWithVolume(reinterpret_cast<int16_t*>(decode_buffer.data()), position, avail, volume);
} else {
provider->GetAudioWithVolume(decode_buffer.data(), position, avail, volume);
}
snd_pcm_sframes_t written = 0;
while (written <= 0)
{

View File

@ -45,6 +45,7 @@
#include <mmsystem.h>
#include <dsound.h>
#include <cguid.h>
namespace {
class DirectSoundPlayer;
@ -111,8 +112,10 @@ DirectSoundPlayer::DirectSoundPlayer(agi::AudioProvider *provider, wxWindow *par
WAVEFORMATEX waveFormat;
waveFormat.wFormatTag = WAVE_FORMAT_PCM;
waveFormat.nSamplesPerSec = provider->GetSampleRate();
waveFormat.nChannels = provider->GetChannels();
waveFormat.wBitsPerSample = provider->GetBytesPerSample() * 8;
//waveFormat.nChannels = provider->GetChannels();
//waveFormat.wBitsPerSample = provider->GetBytesPerSample() * 8;
waveFormat.nChannels = 1;
waveFormat.wBitsPerSample = sizeof(int16_t) * 8;
waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
waveFormat.cbSize = sizeof(waveFormat);
@ -160,7 +163,7 @@ bool DirectSoundPlayer::FillBuffer(bool fill) {
HRESULT res;
void *ptr1, *ptr2;
unsigned long int size1, size2;
int bytesps = provider->GetBytesPerSample();
int bytesps = /*provider->GetBytesPerSample()*/ sizeof(int16_t);
// To write length
int toWrite = 0;
@ -223,8 +226,8 @@ RetryLock:
LOG_D_IF(!count1 && !count2, "audio/player/dsound1") << "DS fill: nothing";
// Get source wave
if (count1) provider->GetAudioWithVolume(ptr1, playPos, count1, volume);
if (count2) provider->GetAudioWithVolume(ptr2, playPos+count1, count2, volume);
if (count1) provider->GetInt16MonoAudioWithVolume(reinterpret_cast<int16_t*>(ptr1), playPos, count1, volume);
if (count2) provider->GetInt16MonoAudioWithVolume(reinterpret_cast<int16_t*>(ptr2), playPos+count1, count2, volume);
playPos += count1+count2;
buffer->Unlock(ptr1,count1*bytesps,ptr2,count2*bytesps);
@ -254,7 +257,7 @@ void DirectSoundPlayer::Play(int64_t start,int64_t count) {
FillBuffer(true);
DWORD play_flag = 0;
if (count*provider->GetBytesPerSample() > bufSize) {
if (count*/*provider->GetBytesPerSample()*/sizeof(int16_t) > bufSize) {
// Start thread
thread = new DirectSoundPlayerThread(this);
thread->Create();
@ -371,4 +374,4 @@ std::unique_ptr<AudioPlayer> CreateDirectSoundPlayer(agi::AudioProvider *provide
return agi::make_unique<DirectSoundPlayer>(provider, parent);
}
#endif // WITH_DIRECTSOUND
#endif // WITH_DIRECTSOUND

View File

@ -317,13 +317,14 @@ void DirectSoundPlayer2Thread::Run()
// Describe the wave format
WAVEFORMATEX waveFormat;
waveFormat.wFormatTag = WAVE_FORMAT_PCM;
waveFormat.nSamplesPerSec = provider->GetSampleRate();
waveFormat.cbSize = 0;
waveFormat.wFormatTag = provider->AreSamplesFloat() ? 3 : WAVE_FORMAT_PCM; // Eh fuck it.
waveFormat.nChannels = provider->GetChannels();
waveFormat.wBitsPerSample = provider->GetBytesPerSample() * 8;
waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
waveFormat.cbSize = sizeof(waveFormat);
//waveFormat.cbSize = sizeof(waveFormat);
// And the buffer itself
int aim = waveFormat.nAvgBytesPerSec * (wanted_latency*buffer_length)/1000;
@ -332,7 +333,7 @@ void DirectSoundPlayer2Thread::Run()
DWORD bufSize = mid(min,aim,max); // size of entire playback buffer
DSBUFFERDESC desc;
desc.dwSize = sizeof(DSBUFFERDESC);
desc.dwFlags = DSBCAPS_GETCURRENTPOSITION2 | DSBCAPS_GLOBALFOCUS;
desc.dwFlags = DSBCAPS_CTRLVOLUME | DSBCAPS_GETCURRENTPOSITION2 | DSBCAPS_GLOBALFOCUS;
desc.dwBufferBytes = bufSize;
desc.dwReserved = 0;
desc.lpwfxFormat = &waveFormat;
@ -461,6 +462,15 @@ stop_playback:
goto do_fill_buffer;
case WAIT_OBJECT_0+3:
{
LONG invert_volume = (LONG)((this->volume - 1.0) * 5000.0); // Hrmm weirdly it's half?
// Look, I would have used a min max but it just errored out for me lol.
if (invert_volume > DSBVOLUME_MAX)
invert_volume = DSBVOLUME_MAX;
else if (invert_volume < DSBVOLUME_MIN / 2)
invert_volume = DSBVOLUME_MIN / 2;
bfr->SetVolume(invert_volume);
}
// Change volume
// We aren't thread safe right now, filling the buffers grabs volume directly
// from the field set by the controlling thread, but it shouldn't be a major
@ -608,7 +618,7 @@ DWORD DirectSoundPlayer2Thread::FillAndUnlockBuffers(void *buf1, DWORD buf1sz, v
buf2sz = 0;
}
provider->GetAudioWithVolume(buf1, input_frame, buf1szf, volume);
provider->GetAudio(buf1, input_frame, buf1szf);
input_frame += buf1szf;
}
@ -621,7 +631,7 @@ DWORD DirectSoundPlayer2Thread::FillAndUnlockBuffers(void *buf1, DWORD buf1sz, v
buf2sz = buf2szf * bytes_per_frame;
}
provider->GetAudioWithVolume(buf2, input_frame, buf2szf, volume);
provider->GetAudio(buf2, input_frame, buf2szf);
input_frame += buf2szf;
}
@ -932,4 +942,4 @@ std::unique_ptr<AudioPlayer> CreateDirectSound2Player(agi::AudioProvider *provid
return agi::make_unique<DirectSoundPlayer2>(provider, parent);
}
#endif // WITH_DIRECTSOUND
#endif // WITH_DIRECTSOUND

View File

@ -71,6 +71,8 @@ class OpenALPlayer final : public AudioPlayer, wxTimer {
float volume = 1.f; ///< Current audio volume
ALsizei samplerate; ///< Sample rate of the audio
int bpf; ///< Bytes per frame
bool fallback_mono16 = false; ///< whether to fall back to int16 mono. FIXME: More flexible conversion
int format; ///< AL format (stereo/mono, 8/16 bit)
int64_t start_frame = 0; ///< First frame of playbacka
int64_t cur_frame = 0; ///< Next frame to write to playback buffers
@ -125,8 +127,39 @@ public:
OpenALPlayer::OpenALPlayer(agi::AudioProvider *provider)
: AudioPlayer(provider)
, samplerate(provider->GetSampleRate())
, bpf(provider->GetChannels() * provider->GetBytesPerSample())
{
switch (provider->GetChannels()) {
case 1:
switch (provider->GetBytesPerSample()) {
case 1:
format = AL_FORMAT_MONO8;
break;
case 2:
format = AL_FORMAT_MONO16;
break;
default:
format = AL_FORMAT_MONO16;
fallback_mono16 = true;
}
break;
case 2:
switch (provider->GetBytesPerSample()) {
case 1:
format = AL_FORMAT_STEREO8;
break;
case 2:
format = AL_FORMAT_STEREO16;
break;
default:
format = AL_FORMAT_MONO16;
fallback_mono16 = true;
}
break;
default:
format = AL_FORMAT_MONO16;
fallback_mono16 = true;
}
bpf = fallback_mono16 ? sizeof(int16_t) : provider->GetChannels() * provider->GetBytesPerSample();
device = alcOpenDevice(nullptr);
if (!device) throw AudioPlayerOpenError("Failed opening default OpenAL device");
@ -239,16 +272,21 @@ void OpenALPlayer::FillBuffers(ALsizei count)
for (count = mid(1, count, buffers_free); count > 0; --count) {
ALsizei fill_len = mid<ALsizei>(0, decode_buffer.size() / bpf, end_frame - cur_frame);
if (fill_len > 0)
if (fill_len > 0) {
// Get fill_len frames of audio
provider->GetAudioWithVolume(&decode_buffer[0], cur_frame, fill_len, volume);
if (fallback_mono16) {
provider->GetInt16MonoAudioWithVolume(reinterpret_cast<int16_t*>(decode_buffer.data()), cur_frame, fill_len, volume);
} else {
provider->GetAudioWithVolume(decode_buffer.data(), cur_frame, fill_len, volume);
}
}
if ((size_t)fill_len * bpf < decode_buffer.size())
// And zerofill the rest
memset(&decode_buffer[fill_len * bpf], 0, decode_buffer.size() - fill_len * bpf);
cur_frame += fill_len;
alBufferData(buffers[buf_first_free], AL_FORMAT_MONO16, &decode_buffer[0], decode_buffer.size(), samplerate);
alBufferData(buffers[buf_first_free], format, &decode_buffer[0], decode_buffer.size(), samplerate);
alSourceQueueBuffers(source, 1, &buffers[buf_first_free]); // FIXME: collect buffer handles and queue all at once instead of one at a time?
buf_first_free = (buf_first_free + 1) % num_buffers;
--buffers_free;

View File

@ -131,7 +131,7 @@ public:
while (!TestDestroy() && parent->cur_frame < parent->end_frame) {
int rsize = std::min(wsize, parent->end_frame - parent->cur_frame);
parent->provider->GetAudioWithVolume(buf, parent->cur_frame,
parent->provider->GetInt16MonoAudioWithVolume(reinterpret_cast<int16_t*>(buf), parent->cur_frame,
rsize, parent->volume);
int written = ::write(parent->dspdev, buf, rsize * parent->bpf);
parent->cur_frame += written / parent->bpf;
@ -146,7 +146,7 @@ public:
void OSSPlayer::OpenStream()
{
bpf = provider->GetChannels() * provider->GetBytesPerSample();
bpf = /*provider->GetChannels() * provider->GetBytesPerSample()*/sizeof(int16_t);
// Open device
wxString device = to_wx(OPT_GET("Player/Audio/OSS/Device")->GetString());
@ -162,14 +162,14 @@ void OSSPlayer::OpenStream()
#endif
// Set number of channels
int channels = provider->GetChannels();
int channels = /*provider->GetChannels()*/1;
if (ioctl(dspdev, SNDCTL_DSP_CHANNELS, &channels) < 0) {
throw AudioPlayerOpenError("OSS player: setting channels failed");
}
// Set sample format
int sample_format;
switch (provider->GetBytesPerSample()) {
switch (/*provider->GetBytesPerSample()*/sizeof(int16_t)) {
case 1:
sample_format = AFMT_S8;
break;
@ -283,4 +283,4 @@ std::unique_ptr<AudioPlayer> CreateOSSPlayer(agi::AudioProvider *provider, wxWin
return agi::make_unique<OSSPlayer>(provider);
}
#endif // WITH_OSS
#endif // WITH_OSS

View File

@ -64,6 +64,32 @@ static const PaHostApiTypeId pa_host_api_priority[] = {
};
static const size_t pa_host_api_priority_count = sizeof(pa_host_api_priority) / sizeof(pa_host_api_priority[0]);
PaSampleFormat PortAudioPlayer::GetSampleFormat(agi::AudioProvider *provider) {
if (provider->AreSamplesFloat()) {
switch (provider->GetBytesPerSample()) {
case 4:
return paFloat32;
default:
fallback_mono16 = true;
return paInt16;
}
} else {
switch (provider->GetBytesPerSample()) {
case 1:
return paUInt8;
case 2:
return paInt16;
case 3:
return paInt24;
case 4:
return paInt32;
default:
fallback_mono16 = true;
return paInt16;
}
}
}
PortAudioPlayer::PortAudioPlayer(agi::AudioProvider *provider) : AudioPlayer(provider) {
PaError err = Pa_Initialize();
@ -140,8 +166,8 @@ void PortAudioPlayer::OpenStream() {
const PaDeviceInfo *device_info = Pa_GetDeviceInfo((*device_ids)[i]);
PaStreamParameters pa_output_p;
pa_output_p.device = (*device_ids)[i];
pa_output_p.channelCount = provider->GetChannels();
pa_output_p.sampleFormat = paInt16;
pa_output_p.sampleFormat = GetSampleFormat(provider);
pa_output_p.channelCount = fallback_mono16 ? 1 : provider->GetChannels();
pa_output_p.suggestedLatency = device_info->defaultLowOutputLatency;
pa_output_p.hostApiSpecificStreamInfo = nullptr;
@ -222,7 +248,11 @@ int PortAudioPlayer::paCallback(const void *inputBuffer, void *outputBuffer,
// Play something
if (lenAvailable > 0) {
player->provider->GetAudioWithVolume(outputBuffer, player->current, lenAvailable, player->GetVolume());
if (player->fallback_mono16) {
player->provider->GetInt16MonoAudioWithVolume(reinterpret_cast<int16_t*>(outputBuffer), player->current, lenAvailable, player->GetVolume());
} else {
player->provider->GetAudioWithVolume(outputBuffer, player->current, lenAvailable, player->GetVolume());
}
// Set play position
player->current += lenAvailable;

View File

@ -64,6 +64,7 @@ class PortAudioPlayer final : public AudioPlayer {
PaTime pa_start; ///< PortAudio internal start position
PaStream *stream = nullptr; ///< PortAudio stream
bool fallback_mono16 = false; ///< whether to fall back to 16 bit mono
/// @brief PortAudio callback, used to fill buffer for playback, and prime the playback buffer.
/// @param inputBuffer Input buffer.
@ -87,6 +88,8 @@ class PortAudioPlayer final : public AudioPlayer {
/// @param userData Local data to be handed to the callback.
static void paStreamFinishedCallback(void *userData);
PaSampleFormat GetSampleFormat(agi::AudioProvider *provider);
/// Gather the list of output devices supported by a host API
/// @param host_idx Host API ID
void GatherDevices(PaHostApiIndex host_idx);

View File

@ -48,7 +48,7 @@
namespace {
class PulseAudioPlayer final : public AudioPlayer {
float volume = 1.f;
pa_cvolume volume;
bool is_playing = false;
volatile unsigned long start_frame = 0;
@ -56,6 +56,7 @@ class PulseAudioPlayer final : public AudioPlayer {
volatile unsigned long end_frame = 0;
unsigned long bpf = 0; // bytes per frame
bool fallback_mono16 = false; // whether to convert to 16 bit mono. FIXME: more flexible conversion
wxSemaphore context_notify{0, 1};
wxSemaphore stream_notify{0, 1};
@ -73,6 +74,7 @@ class PulseAudioPlayer final : public AudioPlayer {
int paerror = 0;
static void pa_setvolume_success(pa_context *c, int success, PulseAudioPlayer *thread);
/// Called by PA to notify about other context-related stuff
static void pa_context_notify(pa_context *c, PulseAudioPlayer *thread);
/// Called by PA when a stream operation completes
@ -82,6 +84,8 @@ class PulseAudioPlayer final : public AudioPlayer {
/// Called by PA to notify about other stream-related stuff
static void pa_stream_notify(pa_stream *p, PulseAudioPlayer *thread);
/// Find the sample format and set fallback_mono16 if necessary
pa_sample_format_t GetSampleFormat(const agi::AudioProvider *provider);
public:
PulseAudioPlayer(agi::AudioProvider *provider);
~PulseAudioPlayer();
@ -94,9 +98,35 @@ public:
int64_t GetCurrentPosition();
void SetEndPosition(int64_t pos);
void SetVolume(double vol) { volume = vol; }
void SetVolume(double vol);
};
pa_sample_format_t PulseAudioPlayer::GetSampleFormat(const agi::AudioProvider *provider) {
if (provider->AreSamplesFloat()) {
switch (provider->GetBytesPerSample()) {
case 4:
return PA_SAMPLE_FLOAT32LE;
default:
fallback_mono16 = true;
return PA_SAMPLE_S16LE;
}
} else {
switch (provider->GetBytesPerSample()) {
case 1:
return PA_SAMPLE_U8;
case 2:
return PA_SAMPLE_S16LE;
case 3:
return PA_SAMPLE_S24LE;
case 4:
return PA_SAMPLE_S32LE;
default:
fallback_mono16 = true;
return PA_SAMPLE_S16LE;
}
}
}
PulseAudioPlayer::PulseAudioPlayer(agi::AudioProvider *provider) : AudioPlayer(provider) {
// Initialise a mainloop
mainloop = pa_threaded_mainloop_new();
@ -133,13 +163,14 @@ PulseAudioPlayer::PulseAudioPlayer(agi::AudioProvider *provider) : AudioPlayer(p
}
// Set up stream
bpf = provider->GetChannels() * provider->GetBytesPerSample();
pa_sample_spec ss;
ss.format = PA_SAMPLE_S16LE; // FIXME
ss.format = GetSampleFormat(provider);
bpf = fallback_mono16 ? sizeof(int16_t) : provider->GetChannels() * provider->GetBytesPerSample();
ss.rate = provider->GetSampleRate();
ss.channels = provider->GetChannels();
ss.channels = fallback_mono16 ? 1 : provider->GetChannels();
pa_channel_map map;
pa_channel_map_init_auto(&map, ss.channels, PA_CHANNEL_MAP_DEFAULT);
pa_cvolume_init(&volume);
stream = pa_stream_new(context, "Sound", &ss, &map);
if (!stream) {
@ -269,6 +300,11 @@ int64_t PulseAudioPlayer::GetCurrentPosition()
return start_frame + playtime * provider->GetSampleRate() / (1000*1000);
}
void PulseAudioPlayer::SetVolume(double vol) {
pa_cvolume_set(&volume, fallback_mono16 ? 1 : provider->GetChannels(), pa_sw_volume_from_linear(vol));
pa_context_set_sink_input_volume(context, pa_stream_get_index(stream), &volume, nullptr, nullptr);
}
/// @brief Called by PA to notify about other context-related stuff
void PulseAudioPlayer::pa_context_notify(pa_context *c, PulseAudioPlayer *thread)
{
@ -308,7 +344,11 @@ void PulseAudioPlayer::pa_stream_write(pa_stream *p, size_t length, PulseAudioPl
unsigned long maxframes = thread->end_frame - thread->cur_frame;
if (frames > maxframes) frames = maxframes;
void *buf = malloc(frames * bpf);
thread->provider->GetAudioWithVolume(buf, thread->cur_frame, frames, thread->volume);
if (thread->fallback_mono16) {
thread->provider->GetInt16MonoAudio(reinterpret_cast<int16_t *>(buf), thread->cur_frame, frames);
} else {
thread->provider->GetAudio(buf, thread->cur_frame, frames);
}
::pa_stream_write(p, buf, frames*bpf, free, 0, PA_SEEK_RELATIVE);
thread->cur_frame += frames;
}

View File

@ -0,0 +1,694 @@
// Copyright (c) 2019, Qirui Wang
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of the Aegisub Group nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Aegisub Project http://www.aegisub.org/
#ifdef WITH_XAUDIO2
#include "include/aegisub/audio_player.h"
#include "options.h"
#include <libaegisub/audio/provider.h>
#include <libaegisub/scoped_ptr.h>
#include <libaegisub/log.h>
#include <libaegisub/make_unique.h>
#ifndef XAUDIO2_REDIST
#include <xaudio2.h>
#else
#include <xaudio2redist.h>
#endif
namespace {
class XAudio2Thread;
/// @class XAudio2Player
/// @brief XAudio2-based audio player
///
/// The core design idea is to have a playback thread that performs all playback operations, and use the player object as a proxy to send commands to the playback thread.
class XAudio2Player final : public AudioPlayer {
/// The playback thread
std::unique_ptr<XAudio2Thread> thread;
/// Desired length in milliseconds to write ahead of the playback cursor
int WantedLatency;
/// Multiplier for WantedLatency to get total buffer length
int BufferLength;
/// @brief Tell whether playback thread is alive
/// @return True if there is a playback thread and it's ready
bool IsThreadAlive();
public:
/// @brief Constructor
XAudio2Player(agi::AudioProvider* provider);
/// @brief Destructor
~XAudio2Player() = default;
/// @brief Start playback
/// @param start First audio frame to play
/// @param count Number of audio frames to play
void Play(int64_t start, int64_t count);
/// @brief Stop audio playback
/// @param timerToo Whether to also stop the playback update timer
void Stop();
/// @brief Tell whether playback is active
/// @return True if audio is playing back
bool IsPlaying();
/// @brief Get playback end position
/// @return Audio frame index
///
/// Returns 0 if playback is stopped or there is no playback thread
int64_t GetEndPosition();
/// @brief Get approximate playback position
/// @return Index of audio frame user is currently hearing
///
/// Returns 0 if playback is stopped or there is no playback thread
int64_t GetCurrentPosition();
/// @brief Change playback end position
/// @param pos New end position
void SetEndPosition(int64_t pos);
/// @brief Change playback volume
/// @param vol Amplification factor
void SetVolume(double vol);
};
/// @brief RAII support class to init and de-init the COM library
struct COMInitialization {
/// Flag set if an inited COM library is managed
bool inited = false;
/// @brief Destructor, de-inits COM if it is inited
~COMInitialization() {
if (inited) CoUninitialize();
}
/// @brief Initialise the COM library as single-threaded apartment if isn't already inited by us
bool Init() {
if (!inited && SUCCEEDED(CoInitialize(nullptr)))
inited = true;
return inited;
}
};
struct ReleaseCOMObject {
void operator()(IUnknown* obj) {
if (obj) obj->Release();
}
};
/// @brief RAII wrapper around Win32 HANDLE type
struct Win32KernelHandle final : public agi::scoped_holder<HANDLE, BOOL(__stdcall*)(HANDLE)> {
/// @brief Create with a managed handle
/// @param handle Win32 handle to manage
Win32KernelHandle(HANDLE handle = 0) :scoped_holder(handle, CloseHandle) {}
Win32KernelHandle& operator=(HANDLE new_handle) {
scoped_holder::operator=(new_handle);
return *this;
}
};
/// @class XAudio2Thread
/// @brief Playback thread class for XAudio2Player
///
/// Not based on wxThread, but uses Win32 threads directly
class XAudio2Thread :public IXAudio2VoiceCallback {
/// @brief Win32 thread entry point
/// @param parameter Pointer to our thread object
/// @return Thread return value, always 0 here
static unsigned int __stdcall ThreadProc(void* parameter);
/// @brief Thread entry point
void Run();
/// @brief Check for error state and throw exception if one occurred
void CheckError();
/// Win32 handle to the thread
Win32KernelHandle thread_handle;
/// Event object, world to thread, set to start playback
Win32KernelHandle event_start_playback;
/// Event object, world to thread, set to stop playback
Win32KernelHandle event_stop_playback;
/// Event object, world to thread, set if playback end time was updated
Win32KernelHandle event_update_end_time;
/// Event object, world to thread, set if the volume was changed
Win32KernelHandle event_set_volume;
/// Event object, world to thread, set if the thread should end as soon as possible
Win32KernelHandle event_buffer_end;
/// Event object, world to thread, set if the thread should end as soon as possible
Win32KernelHandle event_kill_self;
/// Event object, thread to world, set when the thread has entered its main loop
Win32KernelHandle thread_running;
/// Event object, thread to world, set when playback is ongoing
Win32KernelHandle is_playing;
/// Event object, thread to world, set if an error state has occurred (implies thread is dying)
Win32KernelHandle error_happened;
/// Statically allocated error message text describing reason for error_happened being set
const char* error_message = nullptr;
/// Playback volume, 1.0 is "unchanged"
double volume = 1.0;
/// Audio frame to start playback at
int64_t start_frame = 0;
/// Audio frame to end playback at
int64_t end_frame = 0;
/// Desired length in milliseconds to write ahead of the playback cursor
int wanted_latency;
/// Multiplier for WantedLatency to get total buffer length
int buffer_length;
/// System millisecond timestamp of last playback start, used to calculate playback position
ULONGLONG last_playback_restart;
/// Audio provider to take sample data from
agi::AudioProvider* provider;
/// Buffer occupied indicator
std::vector<bool> buffer_occupied;
public:
/// @brief Constructor, creates and starts playback thread
/// @param provider Audio provider to take sample data from
/// @param WantedLatency Desired length in milliseconds to write ahead of the playback cursor
/// @param BufferLength Multiplier for WantedLatency to get total buffer length
XAudio2Thread(agi::AudioProvider* provider, int WantedLatency, int BufferLength);
/// @brief Destructor, waits for thread to have died
~XAudio2Thread();
// IXAudio2VoiceCallback
void STDMETHODCALLTYPE OnVoiceProcessingPassStart(UINT32 BytesRequired) override {}
void STDMETHODCALLTYPE OnVoiceProcessingPassEnd() override {}
void STDMETHODCALLTYPE OnStreamEnd() override {}
void STDMETHODCALLTYPE OnBufferStart(void* pBufferContext) override {}
void STDMETHODCALLTYPE OnBufferEnd(void* pBufferContext) override {
intptr_t i = reinterpret_cast<intptr_t>(pBufferContext);
buffer_occupied[i] = false;
SetEvent(event_buffer_end);
}
void STDMETHODCALLTYPE OnLoopEnd(void* pBufferContext) override {}
void STDMETHODCALLTYPE OnVoiceError(void* pBufferContext, HRESULT Error) override {}
/// @brief Start audio playback
/// @param start Audio frame to start playback at
/// @param count Number of audio frames to play
void Play(int64_t start, int64_t count);
/// @brief Stop audio playback
void Stop();
/// @brief Change audio playback end point
/// @param new_end_frame New last audio frame to play
///
/// Playback stops instantly if new_end_frame is before the current playback position
void SetEndFrame(int64_t new_end_frame);
/// @brief Change audio playback volume
/// @param new_volume New playback amplification factor, 1.0 is "unchanged"
void SetVolume(double new_volume);
/// @brief Tell whether audio playback is active
/// @return True if audio is being played back, false if it is not
bool IsPlaying();
/// @brief Get approximate current audio frame being heard by the user
/// @return Audio frame index
///
/// Returns 0 if not playing
int64_t GetCurrentFrame();
/// @brief Get audio playback end point
/// @return Audio frame index
int64_t GetEndFrame();
/// @brief Tell whether playback thread has died
/// @return True if thread is no longer running
bool IsDead();
};
unsigned int __stdcall XAudio2Thread::ThreadProc(void* parameter) {
static_cast<XAudio2Thread*>(parameter)->Run();
return 0;
}
/// Macro used to set error_message, error_happened and end the thread
#define REPORT_ERROR(msg) \
{ \
ResetEvent(is_playing); \
error_message = "XAudio2Thread: " msg; \
SetEvent(error_happened); \
return; \
}
void XAudio2Thread::Run() {
COMInitialization COM_library;
if (!COM_library.Init()) {
REPORT_ERROR("Could not initialise COM")
}
IXAudio2* pXAudio2;
IXAudio2SourceVoice* pSourceVoice;
HRESULT hr;
if (FAILED(hr = XAudio2Create(&pXAudio2, 0, XAUDIO2_DEFAULT_PROCESSOR))) {
REPORT_ERROR("Failed initializing XAudio2")
}
IXAudio2MasteringVoice* pMasterVoice = NULL;
if (FAILED(hr = pXAudio2->CreateMasteringVoice(&pMasterVoice))) {
REPORT_ERROR("Failed initializing XAudio2 MasteringVoice")
}
// Describe the wave format
WAVEFORMATEX wfx;
wfx.nSamplesPerSec = provider->GetSampleRate();
wfx.cbSize = 0;
bool original = true;
wfx.wFormatTag = provider->AreSamplesFloat() ? WAVE_FORMAT_IEEE_FLOAT : WAVE_FORMAT_PCM;
wfx.nChannels = provider->GetChannels();
wfx.wBitsPerSample = provider->GetBytesPerSample() * 8;
wfx.nBlockAlign = wfx.nChannels * wfx.wBitsPerSample / 8;
wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign;
if (FAILED(hr = pXAudio2->CreateSourceVoice(&pSourceVoice, &wfx, 0, 2, this))) {
if (hr == XAUDIO2_E_INVALID_CALL) {
// Retry with 16bit mono
original = false;
wfx.wFormatTag = WAVE_FORMAT_PCM;
wfx.nChannels = 1;
wfx.wBitsPerSample = sizeof(int16_t) * 8;
wfx.nBlockAlign = wfx.nChannels * wfx.wBitsPerSample / 8;
wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign;
if (FAILED(hr = pXAudio2->CreateSourceVoice(&pSourceVoice, &wfx, 0, 2, this))) {
REPORT_ERROR("Failed initializing XAudio2 SourceVoice")
}
}
else {
REPORT_ERROR("Failed initializing XAudio2 SourceVoice")
}
}
// Now we're ready to roll!
SetEvent(thread_running);
bool running = true;
HANDLE events_to_wait[] = {
event_start_playback,
event_stop_playback,
event_update_end_time,
event_set_volume,
event_buffer_end,
event_kill_self
};
int64_t next_input_frame = 0;
DWORD buffer_offset = 0;
bool playback_should_be_running = false;
int current_latency = wanted_latency;
const int wanted_frames = wanted_latency * wfx.nSamplesPerSec / 1000;
const DWORD wanted_latency_bytes = wanted_frames * wfx.nBlockAlign;
std::vector<std::vector<BYTE> > buff(buffer_length);
for (auto& i : buff)
i.resize(wanted_latency_bytes);
while (running) {
DWORD wait_result = WaitForMultipleObjects(sizeof(events_to_wait) / sizeof(HANDLE), events_to_wait, FALSE, INFINITE);
switch (wait_result) {
case WAIT_OBJECT_0 + 0:
// Start or restart playback
pSourceVoice->Stop();
pSourceVoice->FlushSourceBuffers();
next_input_frame = start_frame;
playback_should_be_running = true;
pSourceVoice->Start();
SetEvent(is_playing);
goto do_fill_buffer;
case WAIT_OBJECT_0 + 1:
stop_playback:
// Stop playing
ResetEvent(is_playing);
pSourceVoice->Stop();
pSourceVoice->FlushSourceBuffers();
playback_should_be_running = false;
break;
case WAIT_OBJECT_0 + 2:
// Set end frame
if (end_frame <= next_input_frame)
goto stop_playback;
goto do_fill_buffer;
case WAIT_OBJECT_0 + 3:
// Change volume
pSourceVoice->SetVolume(volume);
break;
case WAIT_OBJECT_0 + 4:
// Buffer end
do_fill_buffer:
// Time to fill more into buffer
if (!playback_should_be_running)
break;
for (int i = 0; i < buffer_length; ++i) {
if (!buffer_occupied[i]) {
int fill_len = std::min<int>(end_frame - next_input_frame, wanted_frames);
if (fill_len <= 0)
break;
buffer_occupied[i] = true;
if (original)
provider->GetAudio(buff[i].data(), next_input_frame, fill_len);
else
provider->GetInt16MonoAudio(reinterpret_cast<int16_t*>(buff[i].data()), next_input_frame, fill_len);
next_input_frame += fill_len;
XAUDIO2_BUFFER xbf;
xbf.Flags = fill_len + next_input_frame == end_frame ? XAUDIO2_END_OF_STREAM : 0;
xbf.AudioBytes = fill_len * wfx.nBlockAlign;
xbf.pAudioData = buff[i].data();
xbf.PlayBegin = 0;
xbf.PlayLength = 0;
xbf.LoopBegin = 0;
xbf.LoopLength = 0;
xbf.LoopCount = 0;
xbf.pContext = reinterpret_cast<void*>(static_cast<intptr_t>(i));
if (FAILED(hr = pSourceVoice->SubmitSourceBuffer(&xbf))) {
REPORT_ERROR("Failed initializing Submit Buffer")
}
}
}
break;
case WAIT_OBJECT_0 + 5:
// Perform suicide
running = false;
pXAudio2->Release();
ResetEvent(is_playing);
playback_should_be_running = false;
break;
default:
REPORT_ERROR("Something bad happened while waiting on events in playback loop, either the wait failed or an event object was abandoned.")
break;
}
}
}
#undef REPORT_ERROR
void XAudio2Thread::CheckError()
{
try {
switch (WaitForSingleObject(error_happened, 0))
{
case WAIT_OBJECT_0:
throw error_message;
case WAIT_ABANDONED:
throw "The XAudio2Thread error signal event was abandoned, somehow. This should not happen.";
case WAIT_FAILED:
throw "Failed checking state of XAudio2Thread error signal event.";
case WAIT_TIMEOUT:
default:
return;
}
}
catch (...) {
ResetEvent(is_playing);
ResetEvent(thread_running);
throw;
}
}
XAudio2Thread::XAudio2Thread(agi::AudioProvider* provider, int WantedLatency, int BufferLength)
: event_start_playback(CreateEvent(0, FALSE, FALSE, 0))
, event_stop_playback(CreateEvent(0, FALSE, FALSE, 0))
, event_update_end_time(CreateEvent(0, FALSE, FALSE, 0))
, event_set_volume(CreateEvent(0, FALSE, FALSE, 0))
, event_buffer_end(CreateEvent(0, FALSE, FALSE, 0))
, event_kill_self(CreateEvent(0, FALSE, FALSE, 0))
, thread_running(CreateEvent(0, TRUE, FALSE, 0))
, is_playing(CreateEvent(0, TRUE, FALSE, 0))
, error_happened(CreateEvent(0, FALSE, FALSE, 0))
, wanted_latency(WantedLatency)
, buffer_length(BufferLength < XAUDIO2_MAX_QUEUED_BUFFERS ? BufferLength : XAUDIO2_MAX_QUEUED_BUFFERS)
, provider(provider)
, buffer_occupied(BufferLength)
{
if (!(thread_handle = (HANDLE)_beginthreadex(0, 0, ThreadProc, this, 0, 0))) {
throw AudioPlayerOpenError("Failed creating playback thread in XAudio2Player. This is bad.");
}
HANDLE running_or_error[] = { thread_running, error_happened };
switch (WaitForMultipleObjects(2, running_or_error, FALSE, INFINITE)) {
case WAIT_OBJECT_0:
// running, all good
return;
case WAIT_OBJECT_0 + 1:
// error happened, we fail
throw AudioPlayerOpenError(error_message ? error_message : "Failed wait for thread start or thread error in XAudio2Player. This is bad.");
default:
throw AudioPlayerOpenError("Failed wait for thread start or thread error in XAudio2Player. This is bad.");
}
}
XAudio2Thread::~XAudio2Thread() {
SetEvent(event_kill_self);
WaitForSingleObject(thread_handle, INFINITE);
}
void XAudio2Thread::Play(int64_t start, int64_t count)
{
CheckError();
start_frame = start;
end_frame = start + count;
SetEvent(event_start_playback);
last_playback_restart = GetTickCount64();
// Block until playback actually begins to avoid race conditions with
// checking if playback is in progress
HANDLE events_to_wait[] = { is_playing, error_happened };
switch (WaitForMultipleObjects(2, events_to_wait, FALSE, INFINITE)) {
case WAIT_OBJECT_0 + 0: // Playing
LOG_D("audio/player/xaudio2") << "Playback begun";
break;
case WAIT_OBJECT_0 + 1: // Error
throw error_message;
default:
throw agi::InternalError("Unexpected result from WaitForMultipleObjects in XAudio2Thread::Play");
}
}
void XAudio2Thread::Stop() {
CheckError();
SetEvent(event_stop_playback);
}
void XAudio2Thread::SetEndFrame(int64_t new_end_frame) {
CheckError();
end_frame = new_end_frame;
SetEvent(event_update_end_time);
}
void XAudio2Thread::SetVolume(double new_volume) {
CheckError();
volume = new_volume;
SetEvent(event_set_volume);
}
bool XAudio2Thread::IsPlaying() {
CheckError();
switch (WaitForSingleObject(is_playing, 0))
{
case WAIT_ABANDONED:
throw "The XAudio2Thread playback state event was abandoned, somehow. This should not happen.";
case WAIT_FAILED:
throw "Failed checking state of XAudio2Thread playback state event.";
case WAIT_OBJECT_0:
return true;
case WAIT_TIMEOUT:
default:
return false;
}
}
int64_t XAudio2Thread::GetCurrentFrame() {
CheckError();
if (!IsPlaying()) return 0;
ULONGLONG milliseconds_elapsed = GetTickCount64() - last_playback_restart;
return start_frame + milliseconds_elapsed * provider->GetSampleRate() / 1000;
}
int64_t XAudio2Thread::GetEndFrame() {
CheckError();
return end_frame;
}
bool XAudio2Thread::IsDead() {
switch (WaitForSingleObject(thread_running, 0))
{
case WAIT_OBJECT_0:
return false;
default:
return true;
}
}
XAudio2Player::XAudio2Player(agi::AudioProvider* provider) :AudioPlayer(provider) {
// The buffer will hold BufferLength times WantedLatency milliseconds of audio
WantedLatency = OPT_GET("Player/Audio/DirectSound/Buffer Latency")->GetInt();
BufferLength = OPT_GET("Player/Audio/DirectSound/Buffer Length")->GetInt();
// sanity checking
if (WantedLatency <= 0)
WantedLatency = 100;
if (BufferLength <= 0)
BufferLength = 5;
try {
thread = agi::make_unique<XAudio2Thread>(provider, WantedLatency, BufferLength);
}
catch (const char* msg) {
LOG_E("audio/player/xaudio2") << msg;
throw AudioPlayerOpenError(msg);
}
}
bool XAudio2Player::IsThreadAlive() {
if (thread && thread->IsDead())
thread.reset();
return static_cast<bool>(thread);
}
void XAudio2Player::Play(int64_t start, int64_t count) {
try {
thread->Play(start, count);
}
catch (const char* msg) {
LOG_E("audio/player/xaudio2") << msg;
}
}
void XAudio2Player::Stop() {
try {
if (IsThreadAlive()) thread->Stop();
}
catch (const char* msg) {
LOG_E("audio/player/xaudio2") << msg;
}
}
bool XAudio2Player::IsPlaying() {
try {
if (!IsThreadAlive()) return false;
return thread->IsPlaying();
}
catch (const char* msg) {
LOG_E("audio/player/xaudio2") << msg;
return false;
}
}
int64_t XAudio2Player::GetEndPosition() {
try {
if (!IsThreadAlive()) return 0;
return thread->GetEndFrame();
}
catch (const char* msg) {
LOG_E("audio/player/xaudio2") << msg;
return 0;
}
}
int64_t XAudio2Player::GetCurrentPosition() {
try {
if (!IsThreadAlive()) return 0;
return thread->GetCurrentFrame();
}
catch (const char* msg) {
LOG_E("audio/player/xaudio2") << msg;
return 0;
}
}
void XAudio2Player::SetEndPosition(int64_t pos) {
try {
if (IsThreadAlive()) thread->SetEndFrame(pos);
}
catch (const char* msg) {
LOG_E("audio/player/xaudio2") << msg;
}
}
void XAudio2Player::SetVolume(double vol) {
try {
if (IsThreadAlive()) thread->SetVolume(vol);
}
catch (const char* msg) {
LOG_E("audio/player/xaudio2") << msg;
}
}
}
std::unique_ptr<AudioPlayer> CreateXAudio2Player(agi::AudioProvider* provider, wxWindow*) {
return agi::make_unique<XAudio2Player>(provider);
}
#endif // WITH_XAUDIO2

View File

@ -63,9 +63,7 @@ public:
bool NeedsCache() const override { return true; }
};
AvisynthAudioProvider::AvisynthAudioProvider(agi::fs::path const& filename) {
agi::acs::CheckFileRead(filename);
AvisynthAudioProvider::AvisynthAudioProvider(agi::fs::path const& filename) try {
std::lock_guard<std::mutex> lock(avs_wrapper.GetMutex());
try {
@ -100,6 +98,9 @@ AvisynthAudioProvider::AvisynthAudioProvider(agi::fs::path const& filename) {
throw agi::AudioProviderError("Avisynth error: " + errmsg);
}
}
catch (AvisynthError& err) {
throw agi::AudioProviderError("Avisynth error: " + std::string(err.msg));
}
void AvisynthAudioProvider::LoadFromClip(AVSValue clip) {
// Check if it has audio
@ -107,12 +108,15 @@ void AvisynthAudioProvider::LoadFromClip(AVSValue clip) {
if (!vi.HasAudio()) throw agi::AudioDataNotFound("No audio found.");
IScriptEnvironment *env = avs_wrapper.GetEnv();
AVSValue script;
// Convert to one channel
AVSValue script = env->Invoke(OPT_GET("Audio/Downmixer")->GetString().c_str(), clip);
std::string downmixtype = OPT_GET("Audio/Downmixer")->GetString();
if (downmixtype == "ConvertToMono" || downmixtype == "GetLeftChannel" || downmixtype == "GetRightChannel")
script = env->Invoke(downmixtype.c_str(), clip);
else
script = clip;
// Convert to 16 bits per sample
script = env->Invoke("ConvertAudioTo16bit", script);
vi = script.AsClip()->GetVideoInfo();
// Convert sample rate
@ -132,8 +136,8 @@ void AvisynthAudioProvider::LoadFromClip(AVSValue clip) {
channels = vi.AudioChannels();
decoded_samples = num_samples = vi.num_audio_samples;
sample_rate = vi.SamplesPerSecond();
bytes_per_sample = vi.BytesPerAudioSample();
float_samples = false;
bytes_per_sample = vi.BytesPerChannelSample();
float_samples = vi.IsSampleType(SAMPLE_FLOAT);
this->clip = tempclip;
}

View File

@ -0,0 +1,90 @@
// Copyright (c) 2022, arch1t3cht <arch1t3cht@gmail.com>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//
// Aegisub Project http://www.aegisub.org/
/// @file audio_provider_bestsource.cpp
/// @brief BS-based audio provider
/// @ingroup audio_input bestsource
///
#ifdef WITH_BESTSOURCE
#include <libaegisub/audio/provider.h>
#include "audiosource.h"
#include "bestsource_common.h"
#include "compat.h"
#include "options.h"
#include <libaegisub/fs.h>
#include <libaegisub/make_unique.h>
#include <libaegisub/background_runner.h>
#include <libaegisub/log.h>
#include <map>
namespace {
class BSAudioProvider final : public agi::AudioProvider {
std::map<std::string, std::string> bsopts;
BestAudioSource bs;
AudioProperties properties;
void FillBuffer(void *Buf, int64_t Start, int64_t Count) const override;
public:
BSAudioProvider(agi::fs::path const& filename, agi::BackgroundRunner *br);
bool NeedsCache() const override { return OPT_GET("Provider/Audio/BestSource/Aegisub Cache")->GetBool(); }
};
/// @brief Constructor
/// @param filename The filename to open
BSAudioProvider::BSAudioProvider(agi::fs::path const& filename, agi::BackgroundRunner *br) try
: bsopts()
, bs(filename.string(), -1, -1, GetBSCacheFile(filename), &bsopts)
{
bs.SetMaxCacheSize(OPT_GET("Provider/Audio/BestSource/Max Cache Size")->GetInt() << 20);
br->Run([&](agi::ProgressSink *ps) {
ps->SetTitle(from_wx(_("Indexing")));
ps->SetMessage(from_wx(_("Creating cache... This can take a while!")));
ps->SetIndeterminate();
if (bs.GetExactDuration()) {
LOG_D("bs") << "File cached and has exact samples.";
}
});
BSCleanCache();
properties = bs.GetAudioProperties();
float_samples = properties.IsFloat;
bytes_per_sample = properties.BytesPerSample;
sample_rate = properties.SampleRate;
channels = properties.Channels;
num_samples = properties.NumSamples;
decoded_samples = OPT_GET("Provider/Audio/BestSource/Aegisub Cache")->GetBool() ? 0 : num_samples;
}
catch (AudioException const& err) {
throw agi::AudioProviderError("Failed to create BestAudioSource");
}
void BSAudioProvider::FillBuffer(void *Buf, int64_t Start, int64_t Count) const {
const_cast<BestAudioSource &>(bs).GetPackedAudio(reinterpret_cast<uint8_t *>(Buf), Start, Count);
}
}
std::unique_ptr<agi::AudioProvider> CreateBSAudioProvider(agi::fs::path const& file, agi::BackgroundRunner *br) {
return agi::make_unique<BSAudioProvider>(file, br);
}
#endif /* WITH_BESTSOURCE */

View File

@ -16,11 +16,13 @@
#include "audio_provider_factory.h"
#include "compat.h"
#include "factory_manager.h"
#include "options.h"
#include "utils.h"
#include <libaegisub/audio/provider.h>
#include <libaegisub/format.h>
#include <libaegisub/fs.h>
#include <libaegisub/log.h>
#include <libaegisub/path.h>
@ -31,12 +33,15 @@ using namespace agi;
std::unique_ptr<AudioProvider> CreateAvisynthAudioProvider(fs::path const& filename, BackgroundRunner *);
std::unique_ptr<AudioProvider> CreateFFmpegSourceAudioProvider(fs::path const& filename, BackgroundRunner *);
std::unique_ptr<AudioProvider> CreateBSAudioProvider(fs::path const& filename, BackgroundRunner *);
std::unique_ptr<AudioProvider> CreateVapourSynthAudioProvider(fs::path const& filename, BackgroundRunner *);
namespace {
struct factory {
const char *name;
std::unique_ptr<AudioProvider> (*create)(fs::path const&, BackgroundRunner *);
bool hidden;
std::function<bool(agi::fs::path const&)> wants_to_open = [](auto p) { return false; };
};
const factory providers[] = {
@ -46,7 +51,13 @@ const factory providers[] = {
{"FFmpegSource", CreateFFmpegSourceAudioProvider, false},
#endif
#ifdef WITH_AVISYNTH
{"Avisynth", CreateAvisynthAudioProvider, false},
{"Avisynth", CreateAvisynthAudioProvider, false, [](auto p) { return agi::fs::HasExtension(p, "avs"); }},
#endif
#ifdef WITH_BESTSOURCE
{"BestSource", CreateBSAudioProvider, false},
#endif
#ifdef WITH_VAPOURSYNTH
{"VapourSynth", CreateVapourSynthAudioProvider, false, [](auto p) { return agi::fs::HasExtension(p, "py") || agi::fs::HasExtension(p, "vpy"); }},
#endif
};
}
@ -55,52 +66,76 @@ std::vector<std::string> GetAudioProviderNames() {
return ::GetClasses(boost::make_iterator_range(std::begin(providers), std::end(providers)));
}
std::unique_ptr<agi::AudioProvider> GetAudioProvider(fs::path const& filename,
Path const& path_helper,
BackgroundRunner *br) {
std::unique_ptr<agi::AudioProvider> SelectAudioProvider(fs::path const& filename,
Path const& path_helper,
BackgroundRunner *br) {
auto preferred = OPT_GET("Audio/Provider")->GetString();
auto sorted = GetSorted(boost::make_iterator_range(std::begin(providers), std::end(providers)), preferred);
std::unique_ptr<AudioProvider> provider;
bool found_file = false;
bool found_audio = false;
std::string msg_all; // error messages from all attempted providers
std::string msg_partial; // error messages from providers that could partially load the file (knows container, missing codec)
RearrangeWithPriority(sorted, filename);
for (auto const& factory : sorted) {
bool found_file = false;
std::string errors;
auto tried_providers = sorted.begin();
for (; tried_providers < sorted.end(); tried_providers++) {
auto factory = *tried_providers;
std::string err;
try {
provider = factory->create(filename, br);
auto provider = factory->create(filename, br);
if (!provider) continue;
LOG_I("audio_provider") << "Using audio provider: " << factory->name;
return provider;
}
catch (AudioDataNotFound const& ex) {
found_file = true;
err = ex.GetMessage();
}
catch (AudioProviderError const& ex) {
found_file = true;
err = ex.GetMessage();
}
errors += std::string(factory->name) + ": " + err + "\n";
LOG_D("audio_provider") << factory->name << ": " << err;
if (factory->name == preferred)
break;
}
catch (fs::FileNotFound const& err) {
LOG_D("audio_provider") << err.GetMessage();
msg_all += std::string(factory->name) + ": " + err.GetMessage() + " not found.\n";
}
catch (AudioDataNotFound const& err) {
LOG_D("audio_provider") << err.GetMessage();
found_file = true;
msg_all += std::string(factory->name) + ": " + err.GetMessage() + "\n";
}
catch (AudioProviderError const& err) {
LOG_D("audio_provider") << err.GetMessage();
found_audio = true;
found_file = true;
std::string thismsg = std::string(factory->name) + ": " + err.GetMessage() + "\n";
msg_all += thismsg;
msg_partial += thismsg;
}
}
if (!provider) {
if (found_audio)
throw AudioProviderError(msg_partial);
if (found_file)
throw AudioDataNotFound(msg_all);
throw fs::FileNotFound(filename);
std::vector<const factory *> remaining_providers(tried_providers + 1, sorted.end());
if (!remaining_providers.size()) {
// No provider could open the file
LOG_E("audio_provider") << "Could not open " << filename;
std::string msg = "Could not open " + filename.string() + ":\n" + errors;
if (!found_file) throw AudioDataNotFound(filename.string());
throw AudioProviderError(msg);
}
std::vector<std::string> names;
for (auto const& f : remaining_providers)
names.push_back(f->name);
int choice = wxGetSingleChoiceIndex(agi::format("Could not open %s with the preferred provider:\n\n%s\nPlease choose a different audio provider to try:", filename.string(), errors), _("Error loading audio"), to_wx(names));
if (choice == -1) {
throw agi::UserCancelException("audio loading cancelled by user");
}
auto factory = remaining_providers[choice];
auto provider = factory->create(filename, br);
if (!provider)
throw AudioProviderError("Audio provider returned null pointer");
LOG_I("audio_provider") << factory->name << ": opened " << filename;
return provider;
}
std::unique_ptr<agi::AudioProvider> GetAudioProvider(fs::path const& filename,
Path const& path_helper,
BackgroundRunner *br) {
std::unique_ptr<agi::AudioProvider> provider = SelectAudioProvider(filename, path_helper, br);
bool needs_cache = provider->NeedsCache();
// Give it a converter if needed

View File

@ -165,17 +165,20 @@ void FFmpegSourceAudioProvider::LoadAudio(agi::fs::path const& filename) {
throw agi::AudioProviderError("unknown or unsupported sample format");
}
if (channels > 1 || bytes_per_sample != 2) {
std::unique_ptr<FFMS_ResampleOptions, decltype(&FFMS_DestroyResampleOptions)>
opt(FFMS_CreateResampleOptions(AudioSource), FFMS_DestroyResampleOptions);
opt->ChannelLayout = FFMS_CH_FRONT_CENTER;
opt->SampleFormat = FFMS_FMT_S16;
if (OPT_GET("Provider/Audio/FFmpegSource/Downmix")->GetBool()) {
if (channels > 2 || bytes_per_sample != 2 || float_samples) {
std::unique_ptr<FFMS_ResampleOptions, decltype(&FFMS_DestroyResampleOptions)>
opt(FFMS_CreateResampleOptions(AudioSource), FFMS_DestroyResampleOptions);
if (channels > 2)
opt->ChannelLayout = FFMS_CH_FRONT_LEFT | FFMS_CH_FRONT_RIGHT;
opt->SampleFormat = FFMS_FMT_S16;
// Might fail if FFMS2 wasn't built with libavresample
if (!FFMS_SetOutputFormatA(AudioSource, opt.get(), nullptr)) {
channels = 1;
bytes_per_sample = 2;
float_samples = false;
// Might fail if FFMS2 wasn't built with libavresample
if (!FFMS_SetOutputFormatA(AudioSource, opt.get(), nullptr)) {
channels = channels > 2 ? 2 : channels;
bytes_per_sample = 2;
float_samples = false;
}
}
}
}

168
src/audio_provider_vs.cpp Normal file
View File

@ -0,0 +1,168 @@
// Copyright (c) 2022, arch1t3cht <arch1t3cht@gmail.com>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//
// Aegisub Project http://www.aegisub.org/
/// @file audio_provider_vs.cpp
/// @brief VapourSynth-based audio provider
/// @ingroup audio_input
///
#ifdef WITH_VAPOURSYNTH
#include <libaegisub/audio/provider.h>
#include "audio_controller.h"
#include "options.h"
#include "utils.h"
#include <libaegisub/access.h>
#include <libaegisub/format.h>
#include <libaegisub/path.h>
#include <libaegisub/make_unique.h>
#include <mutex>
#include "vapoursynth_wrap.h"
#include "vapoursynth_common.h"
#include "VSScript4.h"
namespace {
class VapourSynthAudioProvider final : public agi::AudioProvider {
VapourSynthWrapper vs;
VSScript *script = nullptr;
VSNode *node = nullptr;
const VSAudioInfo *vi = nullptr;
void FillBufferWithFrame(void *buf, int frame, int64_t start, int64_t count) const;
void FillBuffer(void *buf, int64_t start, int64_t count) const override;
public:
VapourSynthAudioProvider(agi::fs::path const& filename);
~VapourSynthAudioProvider();
bool NeedsCache() const override { return true; }
};
VapourSynthAudioProvider::VapourSynthAudioProvider(agi::fs::path const& filename) try {
std::lock_guard<std::mutex> lock(vs.GetMutex());
VSCleanCache();
script = vs.GetScriptAPI()->createScript(nullptr);
if (script == nullptr) {
throw VapourSynthError("Error creating script API");
}
vs.GetScriptAPI()->evalSetWorkingDir(script, 1);
if (OpenScriptOrVideo(vs.GetAPI(), vs.GetScriptAPI(), script, filename, OPT_GET("Provider/Audio/VapourSynth/Default Script")->GetString())) {
std::string msg = agi::format("Error executing VapourSynth script: %s", vs.GetScriptAPI()->getError(script));
vs.GetScriptAPI()->freeScript(script);
throw VapourSynthError(msg);
}
node = vs.GetScriptAPI()->getOutputNode(script, 0);
if (node == nullptr) {
vs.GetScriptAPI()->freeScript(script);
throw VapourSynthError("No output node set");
}
if (vs.GetAPI()->getNodeType(node) != mtAudio) {
vs.GetAPI()->freeNode(node);
vs.GetScriptAPI()->freeScript(script);
throw VapourSynthError("Output node isn't an audio node");
}
vi = vs.GetAPI()->getAudioInfo(node);
float_samples = vi->format.sampleType == stFloat;
bytes_per_sample = vi->format.bytesPerSample;
sample_rate = vi->sampleRate;
channels = vi->format.numChannels;
num_samples = vi->numSamples;
}
catch (VapourSynthError const& err) {
throw agi::AudioProviderError(err.GetMessage());
}
template<typename T>
static void PackChannels(const uint8_t **Src, void *Dst, size_t Length, size_t Channels) {
T *D = reinterpret_cast<T *>(Dst);
for (size_t c = 0; c < Channels; c++) {
const T *S = reinterpret_cast<const T *>(Src[c]);
for (size_t i = 0; i < Length; i++) {
D[Channels * i + c] = S[i];
}
}
}
void VapourSynthAudioProvider::FillBufferWithFrame(void *buf, int n, int64_t start, int64_t count) const {
char errorMsg[1024];
const VSFrame *frame = vs.GetAPI()->getFrame(n, node, errorMsg, sizeof(errorMsg));
if (frame == nullptr) {
throw VapourSynthError(agi::format("Error getting frame: %s", errorMsg));
}
if (vs.GetAPI()->getFrameLength(frame) < count) {
vs.GetAPI()->freeFrame(frame);
throw VapourSynthError("Audio frame too short");
}
if (vs.GetAPI()->getAudioFrameFormat(frame)->numChannels != channels || vs.GetAPI()->getAudioFrameFormat(frame)->bytesPerSample != bytes_per_sample) {
vs.GetAPI()->freeFrame(frame);
throw VapourSynthError("Audio format is not constant");
}
std::vector<const uint8_t *> planes(channels);
for (int c = 0; c < channels; c++) {
planes[c] = vs.GetAPI()->getReadPtr(frame, c) + bytes_per_sample * start;
if (planes[c] == nullptr) {
vs.GetAPI()->freeFrame(frame);
throw VapourSynthError("Failed to read audio channel");
}
}
if (bytes_per_sample == 1)
PackChannels<uint8_t>(planes.data(), buf, count, channels);
else if (bytes_per_sample == 2)
PackChannels<uint16_t>(planes.data(), buf, count, channels);
else if (bytes_per_sample == 4)
PackChannels<uint32_t>(planes.data(), buf, count, channels);
else if (bytes_per_sample == 8)
PackChannels<uint64_t>(planes.data(), buf, count, channels);
vs.GetAPI()->freeFrame(frame);
}
void VapourSynthAudioProvider::FillBuffer(void *buf, int64_t start, int64_t count) const {
int end = start + count; // exclusive
int startframe = start / VS_AUDIO_FRAME_SAMPLES;
int endframe = (end - 1) / VS_AUDIO_FRAME_SAMPLES;
int offset = start - (VS_AUDIO_FRAME_SAMPLES * startframe);
for (int frame = startframe; frame <= endframe; frame++) {
int framestart = frame * VS_AUDIO_FRAME_SAMPLES;
int frameend = (frame + 1) * VS_AUDIO_FRAME_SAMPLES;
int fstart = framestart < start ? start - framestart : 0;
int fcount = VS_AUDIO_FRAME_SAMPLES - fstart - (frameend > end ? frameend - end : 0);
int bufstart = frame == startframe ? 0 : (frame - startframe) * VS_AUDIO_FRAME_SAMPLES - offset;
FillBufferWithFrame(reinterpret_cast<uint8_t *>(buf) + channels * bytes_per_sample * bufstart, frame, fstart, fcount);
}
}
VapourSynthAudioProvider::~VapourSynthAudioProvider() {
if (node != nullptr) {
vs.GetAPI()->freeNode(node);
}
if (script != nullptr) {
vs.GetScriptAPI()->freeScript(script);
}
}
}
std::unique_ptr<agi::AudioProvider> CreateVapourSynthAudioProvider(agi::fs::path const& file, agi::BackgroundRunner *) {
return agi::make_unique<VapourSynthAudioProvider>(file);
}
#endif

View File

@ -100,6 +100,8 @@ AudioSpectrumRenderer::~AudioSpectrumRenderer()
void AudioSpectrumRenderer::RecreateCache()
{
update_derivation_values ();
#ifdef WITH_FFTW3
if (dft_plan)
{
@ -143,20 +145,29 @@ void AudioSpectrumRenderer::OnSetProvider()
void AudioSpectrumRenderer::SetResolution(size_t _derivation_size, size_t _derivation_dist)
{
if (derivation_dist != _derivation_dist)
if (derivation_dist_user != _derivation_dist)
{
derivation_dist = _derivation_dist;
if (cache)
cache->Age(0);
derivation_dist_user = _derivation_dist;
update_derivation_values ();
AgeCache (0);
}
if (derivation_size != _derivation_size)
if (derivation_size_user != _derivation_size)
{
derivation_size = _derivation_size;
derivation_size_user = _derivation_size;
RecreateCache();
}
}
void AudioSpectrumRenderer::set_reference_frequency_position (float pos_fref_)
{
assert (pos_fref_ > 0.f);
assert (pos_fref_ < 1.f);
pos_fref = pos_fref_;
}
template<class T>
void AudioSpectrumRenderer::ConvertToFloat(size_t count, T *dest) {
for (size_t si = 0; si < count; ++si)
@ -165,20 +176,53 @@ void AudioSpectrumRenderer::ConvertToFloat(size_t count, T *dest) {
}
}
void AudioSpectrumRenderer::update_derivation_values ()
{
// Below this sampling rate (Hz), the derivation values are identical to
// the user-provided ones. Otherwise, they are scaled according to the
// ratio between the sampling rates.
// The threshold is set at 50 kHz so with standard rates like 48 kHz,
// the values are kept identical, and scaled with higher standard rates
// like 88.2 or 96 kHz.
constexpr float sample_rate_ref = 50000.f;
derivation_dist = derivation_dist_user;
derivation_size = derivation_size_user;
if (provider != nullptr)
{
const int sample_rate = provider->GetSampleRate ();
float mult = float (sample_rate) / sample_rate_ref;
while (mult > 1)
{
++ derivation_dist;
++ derivation_size;
mult *= 0.5f;
}
}
}
void AudioSpectrumRenderer::FillBlock(size_t block_index, float *block)
{
assert(cache);
assert(block);
int64_t first_sample = (((int64_t)block_index) << derivation_dist) - ((int64_t)1 << derivation_size);
provider->GetAudio(&audio_scratch[0], first_sample, 2 << derivation_size);
provider->GetInt16MonoAudio(audio_scratch.data(), first_sample, 2 << derivation_size);
// Because the FFTs used here are unnormalized DFTs, we have to compensate
// the possible length difference between derivation_size used in the
// calculations and its user-provided counterpart. Thus, the display is
// kept independent of the sampling rate.
const float scale_fix =
1.f / sqrtf (float (1 << (derivation_size - derivation_size_user)));
#ifdef WITH_FFTW3
ConvertToFloat(2 << derivation_size, dft_input);
fftw_execute(dft_plan);
double scale_factor = 9 / sqrt(2 << (derivation_size + 1));
double scale_factor = scale_fix * 9 / sqrt(2 << (derivation_size + 1));
fftw_complex *o = dft_output;
for (size_t si = (size_t)1<<derivation_size; si > 0; --si)
@ -196,7 +240,7 @@ void AudioSpectrumRenderer::FillBlock(size_t block_index, float *block)
FFT fft;
fft.Transform(2<<derivation_size, fft_input, fft_real, fft_imag);
float scale_factor = 9 / sqrt(2 * (float)(2<<derivation_size));
float scale_factor = scale_fix * 9 / sqrt(2 * (float)(2<<derivation_size));
for (size_t si = 1<<derivation_size; si > 0; --si)
{
@ -211,6 +255,10 @@ void AudioSpectrumRenderer::FillBlock(size_t block_index, float *block)
void AudioSpectrumRenderer::Render(wxBitmap &bmp, int start, AudioRenderingStyle style)
{
// Misc. utility functions
auto floor_int = [] (float val) { return int (floorf (val )); };
auto round_int = [] (float val) { return int (floorf (val + 0.5f)); };
if (!cache)
return;
@ -231,9 +279,34 @@ void AudioSpectrumRenderer::Render(wxBitmap &bmp, int start, AudioRenderingStyle
const AudioColorScheme *pal = &colors[style];
/// @todo Make minband and maxband configurable
int minband = 0;
int maxband = 1 << derivation_size;
// Sampling rate, in Hz.
const float sample_rate = float (provider->GetSampleRate ());
// Number of FFT bins, excluding the "Nyquist" one
const int nbr_bins = 1 << derivation_size;
// minband and maxband define an half-open range.
int minband = 1; // Starts at 1, we don't care about showing the DC.
int maxband = std::min (
round_int (nbr_bins * max_freq / (sample_rate * 0.5f)),
nbr_bins
);
assert (minband < maxband);
// Precomputes this once, this will be useful for the log curve.
const float scale_log = logf (maxband / minband);
// Turns the user-specified 1 kHz position into a ratio between the linear
// and logarithmic curves that we can directly use in the following
// calculations.
assert (pos_fref > 0);
assert (pos_fref < 1);
float b_fref = nbr_bins * freq_ref / (sample_rate * 0.5f);
b_fref = mid (1.f, b_fref, float (maxband - 1));
const float clin = minband + (maxband - minband) * pos_fref;
const float clog = minband * expf (pos_fref * scale_log);
float log_ratio_calc = (b_fref - clin) / (clog - clin);
log_ratio_calc = mid (0.f, log_ratio_calc, 1.f);
// ax = absolute x, absolute to the virtual spectrum bitmap
for (int ax = start; ax < end; ++ax)
@ -245,36 +318,51 @@ void AudioSpectrumRenderer::Render(wxBitmap &bmp, int start, AudioRenderingStyle
// Prepare bitmap writing
unsigned char *px = imgdata + (imgheight-1) * stride + (ax - start) * 3;
// Scale up or down vertically?
if (imgheight > 1<<derivation_size)
float bin_prv = minband;
float bin_cur = minband;
for (int y = 0; y < imgheight; ++y)
{
// Interpolate
for (int y = 0; y < imgheight; ++y)
assert (bin_cur < float (maxband));
float bin_nxt = maxband;
if (y + 1 < imgheight)
{
assert(px >= imgdata);
assert(px < imgdata + imgheight*stride);
auto ideal = (double)(y+1.)/imgheight * (maxband-minband) + minband;
float sample1 = power[(int)floor(ideal)+minband];
float sample2 = power[(int)ceil(ideal)+minband];
float frac = ideal - floor(ideal);
float val = (1-frac)*sample1 + frac*sample2;
pal->map(val*amplitude_scale, px);
px -= stride;
// Bin index is an interpolation between the linear and log curves.
const float pos_rel = float (y + 1) / float (imgheight);
const float b_lin = minband + pos_rel * (maxband - minband);
const float b_log = minband * expf (pos_rel * scale_log);
bin_nxt = b_lin + log_ratio_calc * (b_log - b_lin);
}
}
else
{
// Pick greatest
for (int y = 0; y < imgheight; ++y)
float val = 0;
// Interpolate between consecutive bins
if (bin_nxt - bin_prv < 2)
{
assert(px >= imgdata);
assert(px < imgdata + imgheight*stride);
int sample1 = std::max(0, maxband * y/imgheight + minband);
int sample2 = std::min((1<<derivation_size)-1, maxband * (y+1)/imgheight + minband);
float maxval = *std::max_element(&power[sample1], &power[sample2 + 1]);
pal->map(maxval*amplitude_scale, px);
px -= stride;
const int bin_0 = floor_int (bin_cur);
const int bin_1 = std::min (bin_0 + 1, nbr_bins - 1);
const float frac = bin_cur - float (bin_0);
const float v0 = power [bin_0];
const float v1 = power [bin_1];
val = v0 + frac * (v1 - v0);
}
// Pick the greatest bin on the interval
else
{
int bin_inf = floor_int ((bin_prv + bin_cur) * 0.5f);
int bin_sup = floor_int ((bin_cur + bin_nxt) * 0.5f);
bin_inf = std::min (bin_inf, nbr_bins - 2);
bin_sup = std::min (bin_sup, nbr_bins - 1);
assert (bin_inf < bin_sup);
val = *std::max_element (&power [bin_inf], &power [bin_sup]);
}
pal->map (val * amplitude_scale, px);
px -= stride;
bin_prv = bin_cur;
bin_cur = bin_nxt;
}
}

Some files were not shown because too many files have changed in this diff Show More