mirror of https://github.com/odrling/Aegisub
Don't block the UI while decoding audio for the caches
Simply zero the memory for audio which hasn't been decoded yet, modify the audio renderer to avoid caching blocks which aren't ready yet, and add a progress indiciator to the audio display scrollbar.
This commit is contained in:
parent
7dfd494a46
commit
a30d6121fd
|
@ -151,10 +151,10 @@ temp_file_mapping::temp_file_mapping(fs::path const& filename, uint64_t size)
|
|||
temp_file_mapping::~temp_file_mapping() { }
|
||||
|
||||
const char *temp_file_mapping::read(int64_t offset, uint64_t length) {
|
||||
return write(offset, length);
|
||||
return map(offset, length, read_only, file_size, file, read_region, read_mapping_start);
|
||||
}
|
||||
|
||||
char *temp_file_mapping::write(int64_t offset, uint64_t length) {
|
||||
return map(offset, length, read_write, file_size, file, region, mapping_start);
|
||||
return map(offset, length, read_write, file_size, file, write_region, write_mapping_start);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,10 +49,13 @@ namespace agi {
|
|||
|
||||
class temp_file_mapping {
|
||||
file_mapping file;
|
||||
std::unique_ptr<boost::interprocess::mapped_region> region;
|
||||
uint64_t mapping_start = 0;
|
||||
uint64_t file_size = 0;
|
||||
|
||||
std::unique_ptr<boost::interprocess::mapped_region> read_region;
|
||||
uint64_t read_mapping_start = 0;
|
||||
std::unique_ptr<boost::interprocess::mapped_region> write_region;
|
||||
uint64_t write_mapping_start = 0;
|
||||
|
||||
public:
|
||||
temp_file_mapping(fs::path const& filename, uint64_t size);
|
||||
~temp_file_mapping();
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
#include "audio_timing.h"
|
||||
#include "block_cache.h"
|
||||
#include "compat.h"
|
||||
#include "include/aegisub/audio_provider.h"
|
||||
#include "include/aegisub/context.h"
|
||||
#include "include/aegisub/hotkey.h"
|
||||
#include "options.h"
|
||||
|
@ -59,6 +60,7 @@
|
|||
#include <wx/dcclient.h>
|
||||
#include <wx/mousestate.h>
|
||||
|
||||
namespace {
|
||||
/// @brief Colourscheme-based UI colour provider
|
||||
///
|
||||
/// This class provides UI colours corresponding to the supplied audio colour
|
||||
|
@ -74,11 +76,8 @@ class UIColours {
|
|||
wxColour dark_focused_colour; ///< Dark focused colour from the colour scheme
|
||||
wxColour sel_focused_colour; ///< Selection focused colour from the colour scheme
|
||||
|
||||
bool focused; ///< Use the focused colours?
|
||||
bool focused = false; ///< Use the focused colours?
|
||||
public:
|
||||
/// Constructor
|
||||
UIColours() : focused(false) { }
|
||||
|
||||
/// Set the colour scheme to load colours from
|
||||
/// @param name Name of the colour scheme
|
||||
void SetColourScheme(std::string const& name)
|
||||
|
@ -113,14 +112,14 @@ class AudioDisplayScrollbar final : public AudioDisplayInteractionObject {
|
|||
wxRect bounds;
|
||||
wxRect thumb;
|
||||
|
||||
bool dragging; ///< user is dragging with the primary mouse button
|
||||
bool dragging = false; ///< user is dragging with the primary mouse button
|
||||
|
||||
int data_length; ///< total amount of data in control
|
||||
int page_length; ///< amount of data in one page
|
||||
int position; ///< first item displayed
|
||||
int data_length = 1; ///< total amount of data in control
|
||||
int page_length = 1; ///< amount of data in one page
|
||||
int position = 0; ///< first item displayed
|
||||
|
||||
int sel_start; ///< first data item in selection
|
||||
int sel_length; ///< number of data items in selection
|
||||
int sel_start = -1; ///< first data item in selection
|
||||
int sel_length = 0; ///< number of data items in selection
|
||||
|
||||
UIColours colours; ///< Colour provider
|
||||
|
||||
|
@ -138,13 +137,7 @@ class AudioDisplayScrollbar final : public AudioDisplayInteractionObject {
|
|||
|
||||
public:
|
||||
AudioDisplayScrollbar(AudioDisplay *display)
|
||||
: dragging(false)
|
||||
, data_length(1)
|
||||
, page_length(1)
|
||||
, position(0)
|
||||
, sel_start(-1)
|
||||
, sel_length(0)
|
||||
, display(display)
|
||||
: display(display)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -217,7 +210,7 @@ public:
|
|||
return dragging;
|
||||
}
|
||||
|
||||
void Paint(wxDC &dc, bool has_focus)
|
||||
void Paint(wxDC &dc, bool has_focus, int load_progress)
|
||||
{
|
||||
colours.SetFocused(has_focus);
|
||||
|
||||
|
@ -236,6 +229,14 @@ public:
|
|||
dc.SetBrush(*wxTRANSPARENT_BRUSH);
|
||||
dc.DrawRectangle(bounds);
|
||||
|
||||
if (load_progress > 0 && load_progress < data_length)
|
||||
{
|
||||
wxRect marker(
|
||||
(int64_t)bounds.width * load_progress / data_length - 25, bounds.y + 1,
|
||||
25, bounds.height - 2);
|
||||
dc.GradientFillLinear(marker, colours.Dark(), colours.Light());
|
||||
}
|
||||
|
||||
dc.SetPen(wxPen(colours.Light()));
|
||||
dc.SetBrush(wxBrush(colours.Light()));
|
||||
dc.DrawRectangle(thumb);
|
||||
|
@ -245,14 +246,14 @@ public:
|
|||
const int AudioDisplayScrollbar::min_width;
|
||||
|
||||
class AudioDisplayTimeline final : public AudioDisplayInteractionObject {
|
||||
int duration; ///< Total duration in ms
|
||||
double ms_per_pixel; ///< Milliseconds per pixel
|
||||
int pixel_left; ///< Leftmost visible pixel (i.e. scroll position)
|
||||
int duration = 0; ///< Total duration in ms
|
||||
double ms_per_pixel = 1.0; ///< Milliseconds per pixel
|
||||
int pixel_left = 0; ///< Leftmost visible pixel (i.e. scroll position)
|
||||
|
||||
wxRect bounds;
|
||||
|
||||
wxPoint drag_lastpos;
|
||||
bool dragging;
|
||||
bool dragging = false;
|
||||
|
||||
enum Scale {
|
||||
Sc_Millisecond,
|
||||
|
@ -276,11 +277,7 @@ class AudioDisplayTimeline final : public AudioDisplayInteractionObject {
|
|||
|
||||
public:
|
||||
AudioDisplayTimeline(AudioDisplay *display)
|
||||
: duration(0)
|
||||
, ms_per_pixel(1.0)
|
||||
, pixel_left(0)
|
||||
, dragging(false)
|
||||
, display(display)
|
||||
: display(display)
|
||||
{
|
||||
int width, height;
|
||||
display->GetTextExtent("0123456789:.", &width, &height);
|
||||
|
@ -457,48 +454,6 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
class AudioMarkerInteractionObject final : public AudioDisplayInteractionObject {
|
||||
// Object-pair being interacted with
|
||||
std::vector<AudioMarker*> markers;
|
||||
AudioTimingController *timing_controller;
|
||||
// Audio display drag is happening on
|
||||
AudioDisplay *display;
|
||||
// Mouse button used to initiate the drag
|
||||
wxMouseButton button_used;
|
||||
// Default to snapping to snappable markers
|
||||
bool default_snap;
|
||||
// Range in pixels to snap at
|
||||
int snap_range;
|
||||
|
||||
public:
|
||||
AudioMarkerInteractionObject(std::vector<AudioMarker*> markers, AudioTimingController *timing_controller, AudioDisplay *display, wxMouseButton button_used)
|
||||
: markers(std::move(markers))
|
||||
, timing_controller(timing_controller)
|
||||
, display(display)
|
||||
, button_used(button_used)
|
||||
, default_snap(OPT_GET("Audio/Snap/Enable")->GetBool())
|
||||
, snap_range(OPT_GET("Audio/Snap/Distance")->GetInt())
|
||||
{
|
||||
}
|
||||
|
||||
bool OnMouseEvent(wxMouseEvent &event) override
|
||||
{
|
||||
if (event.Dragging())
|
||||
{
|
||||
timing_controller->OnMarkerDrag(
|
||||
markers,
|
||||
display->TimeFromRelativeX(event.GetPosition().x),
|
||||
default_snap != event.ShiftDown() ? display->TimeFromAbsoluteX(snap_range) : 0);
|
||||
}
|
||||
|
||||
// We lose the marker drag if the button used to initiate it goes up
|
||||
return !event.ButtonUp(button_used);
|
||||
}
|
||||
|
||||
/// Get the position in milliseconds of this group of markers
|
||||
int GetPosition() const { return markers.front()->GetPosition(); }
|
||||
};
|
||||
|
||||
class AudioStyleRangeMerger final : public AudioRenderingStyleRanges {
|
||||
typedef std::map<int, AudioRenderingStyle> style_map;
|
||||
public:
|
||||
|
@ -548,6 +503,50 @@ public:
|
|||
iterator end() { return points.end(); }
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
class AudioMarkerInteractionObject final : public AudioDisplayInteractionObject {
|
||||
// Object-pair being interacted with
|
||||
std::vector<AudioMarker*> markers;
|
||||
AudioTimingController *timing_controller;
|
||||
// Audio display drag is happening on
|
||||
AudioDisplay *display;
|
||||
// Mouse button used to initiate the drag
|
||||
wxMouseButton button_used;
|
||||
// Default to snapping to snappable markers
|
||||
bool default_snap;
|
||||
// Range in pixels to snap at
|
||||
int snap_range;
|
||||
|
||||
public:
|
||||
AudioMarkerInteractionObject(std::vector<AudioMarker*> markers, AudioTimingController *timing_controller, AudioDisplay *display, wxMouseButton button_used)
|
||||
: markers(std::move(markers))
|
||||
, timing_controller(timing_controller)
|
||||
, display(display)
|
||||
, button_used(button_used)
|
||||
, default_snap(OPT_GET("Audio/Snap/Enable")->GetBool())
|
||||
, snap_range(OPT_GET("Audio/Snap/Distance")->GetInt())
|
||||
{
|
||||
}
|
||||
|
||||
bool OnMouseEvent(wxMouseEvent &event) override
|
||||
{
|
||||
if (event.Dragging())
|
||||
{
|
||||
timing_controller->OnMarkerDrag(
|
||||
markers,
|
||||
display->TimeFromRelativeX(event.GetPosition().x),
|
||||
default_snap != event.ShiftDown() ? display->TimeFromAbsoluteX(snap_range) : 0);
|
||||
}
|
||||
|
||||
// We lose the marker drag if the button used to initiate it goes up
|
||||
return !event.ButtonUp(button_used);
|
||||
}
|
||||
|
||||
/// Get the position in milliseconds of this group of markers
|
||||
int GetPosition() const { return markers.front()->GetPosition(); }
|
||||
};
|
||||
|
||||
AudioDisplay::AudioDisplay(wxWindow *parent, AudioController *controller, agi::Context *context)
|
||||
: wxWindow(parent, -1, wxDefaultPosition, wxDefaultSize, wxWANTS_CHARS|wxBORDER_SIMPLE)
|
||||
, audio_open_connection(controller->AddAudioOpenListener(&AudioDisplay::OnAudioOpen, this))
|
||||
|
@ -582,6 +581,7 @@ AudioDisplay::AudioDisplay(wxWindow *parent, AudioController *controller, agi::C
|
|||
Bind(wxEVT_CHAR_HOOK, &AudioDisplay::OnKeyDown, this);
|
||||
Bind(wxEVT_KEY_DOWN, &AudioDisplay::OnKeyDown, this);
|
||||
scroll_timer.Bind(wxEVT_TIMER, &AudioDisplay::OnScrollTimer, this);
|
||||
load_timer.Bind(wxEVT_TIMER, &AudioDisplay::OnLoadTimer, this);
|
||||
}
|
||||
|
||||
AudioDisplay::~AudioDisplay()
|
||||
|
@ -754,9 +754,43 @@ void AudioDisplay::ReloadRenderingSettings()
|
|||
Refresh();
|
||||
}
|
||||
|
||||
void AudioDisplay::OnLoadTimer(wxTimerEvent&)
|
||||
{
|
||||
using namespace std::chrono;
|
||||
if (provider)
|
||||
{
|
||||
const auto now = steady_clock::now();
|
||||
const auto elapsed = duration_cast<milliseconds>(now - audio_load_start_time).count();
|
||||
if (elapsed == 0) return;
|
||||
|
||||
const int64_t new_decoded_count = provider->GetDecodedSamples();
|
||||
if (new_decoded_count != last_sample_decoded)
|
||||
audio_load_speed = (audio_load_speed + (double)new_decoded_count / elapsed) / 2;
|
||||
if (audio_load_speed == 0) return;
|
||||
|
||||
int new_pos = AbsoluteXFromTime(elapsed * audio_load_speed * 1000.0 / provider->GetSampleRate());
|
||||
if (new_pos > audio_load_position)
|
||||
audio_load_position = new_pos;
|
||||
|
||||
const double left = last_sample_decoded * 1000.0 / provider->GetSampleRate() / ms_per_pixel;
|
||||
const double right = new_decoded_count * 1000.0 / provider->GetSampleRate() / ms_per_pixel;
|
||||
|
||||
if (left < scroll_left + pixel_audio_width && right >= scroll_left)
|
||||
Refresh();
|
||||
else
|
||||
RefreshRect(scrollbar->GetBounds());
|
||||
last_sample_decoded = new_decoded_count;
|
||||
}
|
||||
|
||||
if (!provider || last_sample_decoded == provider->GetNumSamples()) {
|
||||
load_timer.Stop();
|
||||
audio_load_position = -1;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioDisplay::OnPaint(wxPaintEvent&)
|
||||
{
|
||||
if (!audio_renderer_provider) return;
|
||||
if (!audio_renderer_provider || !provider) return;
|
||||
|
||||
wxAutoBufferedPaintDC dc(this);
|
||||
|
||||
|
@ -787,7 +821,7 @@ void AudioDisplay::OnPaint(wxPaintEvent&)
|
|||
PaintTrackCursor(dc);
|
||||
|
||||
if (redraw_scrollbar)
|
||||
scrollbar->Paint(dc, HasFocus());
|
||||
scrollbar->Paint(dc, HasFocus(), audio_load_position);
|
||||
if (redraw_timeline)
|
||||
timeline->Paint(dc);
|
||||
}
|
||||
|
@ -1142,6 +1176,8 @@ void AudioDisplay::OnFocus(wxFocusEvent &)
|
|||
|
||||
void AudioDisplay::OnAudioOpen(AudioProvider *provider)
|
||||
{
|
||||
this->provider = provider;
|
||||
|
||||
if (!audio_renderer_provider)
|
||||
ReloadRenderingSettings();
|
||||
|
||||
|
@ -1171,6 +1207,13 @@ void AudioDisplay::OnAudioOpen(AudioProvider *provider)
|
|||
|
||||
OnTimingController();
|
||||
}
|
||||
|
||||
last_sample_decoded = provider->GetDecodedSamples();
|
||||
audio_load_position = -1;
|
||||
audio_load_speed = 0;
|
||||
audio_load_start_time = std::chrono::steady_clock::now();
|
||||
if (last_sample_decoded != provider->GetNumSamples())
|
||||
load_timer.Start(100);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
/// @ingroup audio_ui
|
||||
///
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <deque>
|
||||
#include <map>
|
||||
|
@ -55,9 +56,11 @@ class AudioProvider;
|
|||
class TimeRange;
|
||||
|
||||
// Helper classes used in implementation of the audio display
|
||||
class AudioDisplayScrollbar;
|
||||
class AudioDisplayTimeline;
|
||||
class AudioDisplaySelection;
|
||||
namespace {
|
||||
class AudioDisplayScrollbar;
|
||||
class AudioDisplayTimeline;
|
||||
class AudioDisplaySelection;
|
||||
}
|
||||
class AudioMarkerInteractionObject;
|
||||
|
||||
/// @class AudioDisplayInteractionObject
|
||||
|
@ -91,7 +94,6 @@ public:
|
|||
virtual ~AudioDisplayInteractionObject() { }
|
||||
};
|
||||
|
||||
|
||||
/// @class AudioDisplay
|
||||
/// @brief Primary view/UI for interaction with audio timing
|
||||
///
|
||||
|
@ -111,7 +113,9 @@ class AudioDisplay: public wxWindow {
|
|||
std::unique_ptr<AudioRendererBitmapProvider> audio_renderer_provider;
|
||||
|
||||
/// The controller managing us
|
||||
AudioController *controller;
|
||||
AudioController *controller = nullptr;
|
||||
|
||||
AudioProvider *provider = nullptr;
|
||||
|
||||
/// Scrollbar helper object
|
||||
std::unique_ptr<AudioDisplayScrollbar> scrollbar;
|
||||
|
@ -132,6 +136,15 @@ class AudioDisplay: public wxWindow {
|
|||
/// Timer for scrolling when markers are dragged out of the displayed area
|
||||
wxTimer scroll_timer;
|
||||
|
||||
wxTimer load_timer;
|
||||
int64_t last_sample_decoded = 0;
|
||||
/// Time at which audio loading began, for calculating loading speed
|
||||
std::chrono::steady_clock::time_point audio_load_start_time;
|
||||
/// Estimated speed of audio decoding in samples per ms
|
||||
double audio_load_speed = 0.0;
|
||||
/// Current position of the audio loading progress in absolute pixels
|
||||
int audio_load_position = 0;
|
||||
|
||||
/// Leftmost pixel in the virtual audio image being displayed
|
||||
int scroll_left = 0;
|
||||
|
||||
|
@ -219,6 +232,7 @@ class AudioDisplay: public wxWindow {
|
|||
/// wxWidgets keypress event
|
||||
void OnKeyDown(wxKeyEvent& event);
|
||||
void OnScrollTimer(wxTimerEvent &event);
|
||||
void OnLoadTimer(wxTimerEvent &);
|
||||
void OnMouseEnter(wxMouseEvent&);
|
||||
void OnMouseLeave(wxMouseEvent&);
|
||||
|
||||
|
|
|
@ -108,8 +108,8 @@ std::unique_ptr<AudioProvider> CreateFFmpegSourceAudioProvider(agi::fs::path con
|
|||
|
||||
std::unique_ptr<AudioProvider> CreateConvertAudioProvider(std::unique_ptr<AudioProvider> source_provider);
|
||||
std::unique_ptr<AudioProvider> CreateLockAudioProvider(std::unique_ptr<AudioProvider> source_provider);
|
||||
std::unique_ptr<AudioProvider> CreateHDAudioProvider(std::unique_ptr<AudioProvider> source_provider, agi::BackgroundRunner *br);
|
||||
std::unique_ptr<AudioProvider> CreateRAMAudioProvider(std::unique_ptr<AudioProvider> source_provider, agi::BackgroundRunner *br);
|
||||
std::unique_ptr<AudioProvider> CreateHDAudioProvider(std::unique_ptr<AudioProvider> source_provider);
|
||||
std::unique_ptr<AudioProvider> CreateRAMAudioProvider(std::unique_ptr<AudioProvider> source_provider);
|
||||
|
||||
namespace {
|
||||
struct factory {
|
||||
|
@ -187,10 +187,10 @@ std::unique_ptr<AudioProvider> AudioProviderFactory::GetProvider(agi::fs::path c
|
|||
return CreateLockAudioProvider(std::move(provider));
|
||||
|
||||
// Convert to RAM
|
||||
if (cache == 1) return CreateRAMAudioProvider(std::move(provider), br);
|
||||
if (cache == 1) return CreateRAMAudioProvider(std::move(provider));
|
||||
|
||||
// Convert to HD
|
||||
if (cache == 2) return CreateHDAudioProvider(std::move(provider), br);
|
||||
if (cache == 2) return CreateHDAudioProvider(std::move(provider));
|
||||
|
||||
throw agi::AudioCacheOpenError("Unknown caching method", nullptr);
|
||||
}
|
||||
|
|
|
@ -132,7 +132,7 @@ void AvisynthAudioProvider::LoadFromClip(AVSValue clip) {
|
|||
|
||||
// Read properties
|
||||
channels = vi.AudioChannels();
|
||||
num_samples = vi.num_audio_samples;
|
||||
decoded_samples = num_samples = vi.num_audio_samples;
|
||||
sample_rate = vi.SamplesPerSecond();
|
||||
bytes_per_sample = vi.BytesPerAudioSample();
|
||||
float_samples = false;
|
||||
|
|
|
@ -143,6 +143,7 @@ public:
|
|||
|
||||
sample_rate *= 2;
|
||||
num_samples *= 2;
|
||||
decoded_samples = decoded_samples * 2;
|
||||
}
|
||||
|
||||
void FillBuffer(void *buf, int64_t start, int64_t count) const override {
|
||||
|
|
|
@ -81,7 +81,7 @@ public:
|
|||
sample_rate = 44100;
|
||||
bytes_per_sample = 2;
|
||||
float_samples = false;
|
||||
num_samples = (int64_t)5*30*60*1000 * sample_rate / 1000;
|
||||
decoded_samples = num_samples = (int64_t)5*30*60*1000 * sample_rate / 1000;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -173,6 +173,7 @@ void FFmpegSourceAudioProvider::LoadAudio(agi::fs::path const& filename) {
|
|||
channels = AudioInfo.Channels;
|
||||
sample_rate = AudioInfo.SampleRate;
|
||||
num_samples = AudioInfo.NumSamples;
|
||||
decoded_samples = AudioInfo.NumSamples;
|
||||
if (channels <= 0 || sample_rate <= 0 || num_samples <= 0)
|
||||
throw agi::AudioProviderOpenError("sanity check failed, consult your local psychiatrist", nullptr);
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include "compat.h"
|
||||
#include "options.h"
|
||||
|
||||
#include <libaegisub/background_runner.h>
|
||||
#include <libaegisub/file_mapping.h>
|
||||
#include <libaegisub/fs.h>
|
||||
#include <libaegisub/path.h>
|
||||
|
@ -31,54 +30,67 @@
|
|||
#include <boost/filesystem.hpp>
|
||||
#include <boost/format.hpp>
|
||||
#include <boost/interprocess/detail/os_thread_functions.hpp>
|
||||
#include <thread>
|
||||
#include <wx/intl.h>
|
||||
|
||||
namespace {
|
||||
class HDAudioProvider final : public AudioProviderWrapper {
|
||||
std::unique_ptr<agi::temp_file_mapping> file;
|
||||
std::atomic<bool> cancelled = {false};
|
||||
std::thread decoder;
|
||||
|
||||
void FillBuffer(void *buf, int64_t start, int64_t count) const override {
|
||||
start *= channels * bytes_per_sample;
|
||||
count *= channels * bytes_per_sample;
|
||||
memcpy(buf, file->read(start, count), count);
|
||||
auto missing = std::min(count, start + count - decoded_samples);
|
||||
if (missing > 0) {
|
||||
memset(static_cast<int16_t*>(buf) + count - missing, 0, missing * bytes_per_sample);
|
||||
count -= missing;
|
||||
}
|
||||
|
||||
if (count > 0) {
|
||||
start *= bytes_per_sample;
|
||||
count *= bytes_per_sample;
|
||||
memcpy(buf, file->read(start, count), count);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
HDAudioProvider(std::unique_ptr<AudioProvider> src, agi::BackgroundRunner *br)
|
||||
HDAudioProvider(std::unique_ptr<AudioProvider> src)
|
||||
: AudioProviderWrapper(std::move(src))
|
||||
{
|
||||
decoded_samples = 0;
|
||||
|
||||
auto path = OPT_GET("Audio/Cache/HD/Location")->GetString();
|
||||
if (path == "default")
|
||||
path = "?temp";
|
||||
auto cache_dir = config::path->MakeAbsolute(config::path->Decode(path), "?temp");
|
||||
|
||||
auto bps = bytes_per_sample * channels;
|
||||
|
||||
// Check free space
|
||||
if ((uint64_t)num_samples * bps > agi::fs::FreeSpace(cache_dir))
|
||||
if ((uint64_t)num_samples * bytes_per_sample > agi::fs::FreeSpace(cache_dir))
|
||||
throw agi::AudioCacheOpenError("Not enough free disk space in " + cache_dir.string() + " to cache the audio", nullptr);
|
||||
|
||||
auto filename = str(boost::format("audio-%lld-%lld")
|
||||
% (long long)time(nullptr)
|
||||
% (long long)boost::interprocess::ipcdetail::get_current_process_id());
|
||||
|
||||
file = agi::util::make_unique<agi::temp_file_mapping>(cache_dir / filename, num_samples * bps);
|
||||
br->Run([&] (agi::ProgressSink *ps) {
|
||||
ps->SetTitle(from_wx(_("Load audio")));
|
||||
ps->SetMessage(from_wx(_("Reading to Hard Disk cache")));
|
||||
|
||||
file = agi::util::make_unique<agi::temp_file_mapping>(cache_dir / filename, num_samples * bytes_per_sample);
|
||||
decoder = std::thread([&] {
|
||||
int64_t block = 65536;
|
||||
for (int64_t i = 0; i < num_samples; i += block) {
|
||||
if (cancelled) break;
|
||||
block = std::min(block, num_samples - i);
|
||||
source->GetAudio(file->write(i * bps, block * bps), i, block);
|
||||
ps->SetProgress(i, num_samples);
|
||||
if (ps->IsCancelled()) return;
|
||||
source->GetAudio(file->write(i * bytes_per_sample, block * bytes_per_sample), i, block);
|
||||
decoded_samples += block;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
~HDAudioProvider() {
|
||||
cancelled = true;
|
||||
decoder.join();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioProvider> CreateHDAudioProvider(std::unique_ptr<AudioProvider> src, agi::BackgroundRunner *br) {
|
||||
return agi::util::make_unique<HDAudioProvider>(std::move(src), br);
|
||||
std::unique_ptr<AudioProvider> CreateHDAudioProvider(std::unique_ptr<AudioProvider> src) {
|
||||
return agi::util::make_unique<HDAudioProvider>(std::move(src));
|
||||
}
|
||||
|
|
|
@ -222,6 +222,8 @@ public:
|
|||
data_left -= (ch.size + 1) & ~1;
|
||||
filepos += (ch.size + 1) & ~1;
|
||||
}
|
||||
|
||||
decoded_samples = num_samples;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -353,6 +355,8 @@ public:
|
|||
data_left -= (chunk_size + 7) & ~7;
|
||||
filepos += (chunk_size + 7) & ~7;
|
||||
}
|
||||
|
||||
decoded_samples = num_samples;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -37,11 +37,11 @@
|
|||
#include "audio_controller.h"
|
||||
#include "compat.h"
|
||||
|
||||
#include <libaegisub/background_runner.h>
|
||||
#include <libaegisub/util.h>
|
||||
|
||||
#include <array>
|
||||
#include <boost/container/stable_vector.hpp>
|
||||
#include <thread>
|
||||
#include <wx/intl.h>
|
||||
|
||||
namespace {
|
||||
|
@ -51,17 +51,21 @@ namespace {
|
|||
|
||||
class RAMAudioProvider final : public AudioProviderWrapper {
|
||||
#ifdef _MSC_VER
|
||||
boost::container::stable_vector<char[1 << 22]> blockcache;
|
||||
boost::container::stable_vector<char[CacheBlockSize]> blockcache;
|
||||
#else
|
||||
boost::container::stable_vector<std::array<char, 1 << 22>> blockcache;
|
||||
boost::container::stable_vector<std::array<char, CacheBlockSize>> blockcache;
|
||||
#endif
|
||||
std::atomic<bool> cancelled = {false};
|
||||
std::thread decoder;
|
||||
|
||||
void FillBuffer(void *buf, int64_t start, int64_t count) const override;
|
||||
|
||||
public:
|
||||
RAMAudioProvider(std::unique_ptr<AudioProvider> src, agi::BackgroundRunner *br)
|
||||
RAMAudioProvider(std::unique_ptr<AudioProvider> src)
|
||||
: AudioProviderWrapper(std::move(src))
|
||||
{
|
||||
decoded_samples = 0;
|
||||
|
||||
try {
|
||||
blockcache.resize((source->GetNumSamples() * source->GetBytesPerSample() + CacheBlockSize - 1) >> CacheBits);
|
||||
}
|
||||
|
@ -69,39 +73,44 @@ public:
|
|||
throw agi::AudioCacheOpenError("Couldn't open audio, not enough ram available.", nullptr);
|
||||
}
|
||||
|
||||
br->Run([&](agi::ProgressSink *ps) {
|
||||
ps->SetTitle(from_wx(_("Load audio")));
|
||||
ps->SetMessage(from_wx(_("Reading into RAM")));
|
||||
|
||||
decoder = std::thread([&] {
|
||||
int64_t readsize = CacheBlockSize / source->GetBytesPerSample();
|
||||
for (size_t i = 0; i < blockcache.size(); i++) {
|
||||
if (ps->IsCancelled()) return;
|
||||
ps->SetProgress(i + 1, blockcache.size());
|
||||
source->GetAudio(&blockcache[i][0], i * readsize, std::min<int64_t>(readsize, num_samples - i * readsize));
|
||||
if (cancelled) break;
|
||||
auto actual_read = std::min<int64_t>(readsize, num_samples - i * readsize);
|
||||
source->GetAudio(&blockcache[i][0], i * readsize, actual_read);
|
||||
decoded_samples += actual_read;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
~RAMAudioProvider() {
|
||||
cancelled = true;
|
||||
decoder.join();
|
||||
}
|
||||
};
|
||||
|
||||
void RAMAudioProvider::FillBuffer(void *buf, int64_t start, int64_t count) const {
|
||||
char *charbuf = static_cast<char *>(buf);
|
||||
int i = (start * bytes_per_sample) >> CacheBits;
|
||||
int start_offset = (start * bytes_per_sample) & (CacheBlockSize-1);
|
||||
int64_t bytesremaining = count * bytes_per_sample;
|
||||
for (int64_t bytes_remaining = count * bytes_per_sample; bytes_remaining; ) {
|
||||
if (start >= decoded_samples) {
|
||||
memset(charbuf, 0, bytes_remaining);
|
||||
break;
|
||||
}
|
||||
|
||||
while (bytesremaining) {
|
||||
int readsize = std::min<int>(bytesremaining, CacheBlockSize - start_offset);
|
||||
int i = (start * bytes_per_sample) >> CacheBits;
|
||||
int start_offset = (start * bytes_per_sample) & (CacheBlockSize-1);
|
||||
int read_size = std::min<int>(bytes_remaining, CacheBlockSize - start_offset);
|
||||
|
||||
memcpy(charbuf, &blockcache[i++][start_offset], readsize);
|
||||
memcpy(charbuf, &blockcache[i++][start_offset], read_size);
|
||||
charbuf += read_size;
|
||||
|
||||
charbuf += readsize;
|
||||
|
||||
start_offset = 0;
|
||||
bytesremaining -= readsize;
|
||||
bytes_remaining -= read_size;
|
||||
start += CacheBlockSize / bytes_per_sample;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioProvider> CreateRAMAudioProvider(std::unique_ptr<AudioProvider> src, agi::BackgroundRunner *br) {
|
||||
return agi::util::make_unique<RAMAudioProvider>(std::move(src), br);
|
||||
std::unique_ptr<AudioProvider> CreateRAMAudioProvider(std::unique_ptr<AudioProvider> src) {
|
||||
return agi::util::make_unique<RAMAudioProvider>(std::move(src));
|
||||
}
|
||||
|
|
|
@ -43,6 +43,16 @@
|
|||
#include <wx/bitmap.h>
|
||||
#include <wx/dcmemory.h>
|
||||
|
||||
namespace {
|
||||
template<typename T>
|
||||
bool compare_and_set(T &var, T new_value)
|
||||
{
|
||||
if (var == new_value) return false;
|
||||
var = new_value;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
AudioRendererBitmapCacheBitmapFactory::AudioRendererBitmapCacheBitmapFactory(AudioRenderer *renderer)
|
||||
: renderer(renderer)
|
||||
{
|
||||
|
@ -71,64 +81,59 @@ AudioRenderer::AudioRenderer()
|
|||
|
||||
void AudioRenderer::SetMillisecondsPerPixel(double new_pixel_ms)
|
||||
{
|
||||
if (pixel_ms == new_pixel_ms) return;
|
||||
if (compare_and_set(pixel_ms, new_pixel_ms))
|
||||
{
|
||||
if (renderer)
|
||||
renderer->SetMillisecondsPerPixel(pixel_ms);
|
||||
|
||||
pixel_ms = new_pixel_ms;
|
||||
|
||||
if (renderer)
|
||||
renderer->SetMillisecondsPerPixel(pixel_ms);
|
||||
|
||||
ResetBlockCount();
|
||||
ResetBlockCount();
|
||||
}
|
||||
}
|
||||
|
||||
void AudioRenderer::SetHeight(int _pixel_height)
|
||||
{
|
||||
if (pixel_height == _pixel_height) return;
|
||||
|
||||
pixel_height = _pixel_height;
|
||||
Invalidate();
|
||||
if (compare_and_set(pixel_height, _pixel_height))
|
||||
Invalidate();
|
||||
}
|
||||
|
||||
void AudioRenderer::SetAmplitudeScale(float _amplitude_scale)
|
||||
{
|
||||
if (amplitude_scale == _amplitude_scale) return;
|
||||
|
||||
// A scaling of 0 or a negative scaling makes no sense
|
||||
assert(_amplitude_scale > 0);
|
||||
|
||||
amplitude_scale = _amplitude_scale;
|
||||
|
||||
if (renderer)
|
||||
renderer->SetAmplitudeScale(amplitude_scale);
|
||||
Invalidate();
|
||||
if (compare_and_set(amplitude_scale, _amplitude_scale))
|
||||
{
|
||||
// A scaling of 0 or a negative scaling makes no sense
|
||||
assert(amplitude_scale > 0);
|
||||
if (renderer)
|
||||
renderer->SetAmplitudeScale(amplitude_scale);
|
||||
Invalidate();
|
||||
}
|
||||
}
|
||||
|
||||
void AudioRenderer::SetRenderer(AudioRendererBitmapProvider *_renderer)
|
||||
{
|
||||
if (renderer == _renderer) return;
|
||||
|
||||
renderer = _renderer;
|
||||
Invalidate();
|
||||
|
||||
if (renderer)
|
||||
if (compare_and_set(renderer, _renderer))
|
||||
{
|
||||
renderer->SetProvider(provider);
|
||||
renderer->SetAmplitudeScale(amplitude_scale);
|
||||
renderer->SetMillisecondsPerPixel(pixel_ms);
|
||||
Invalidate();
|
||||
|
||||
if (renderer)
|
||||
{
|
||||
renderer->SetProvider(provider);
|
||||
renderer->SetAmplitudeScale(amplitude_scale);
|
||||
renderer->SetMillisecondsPerPixel(pixel_ms);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AudioRenderer::SetAudioProvider(AudioProvider *_provider)
|
||||
{
|
||||
if (provider == _provider) return;
|
||||
if (compare_and_set(provider, _provider))
|
||||
{
|
||||
Invalidate();
|
||||
|
||||
provider = _provider;
|
||||
Invalidate();
|
||||
if (renderer)
|
||||
renderer->SetProvider(provider);
|
||||
|
||||
if (renderer)
|
||||
renderer->SetProvider(provider);
|
||||
|
||||
ResetBlockCount();
|
||||
ResetBlockCount();
|
||||
}
|
||||
}
|
||||
|
||||
void AudioRenderer::SetCacheMaxSize(size_t max_size)
|
||||
|
@ -145,13 +150,17 @@ void AudioRenderer::ResetBlockCount()
|
|||
{
|
||||
if (provider)
|
||||
{
|
||||
double duration = provider->GetNumSamples() * 1000.0 / provider->GetSampleRate();
|
||||
size_t rendered_width = (size_t)ceil(duration / pixel_ms);
|
||||
cache_numblocks = rendered_width / cache_bitmap_width;
|
||||
for (auto& bmp : bitmaps) bmp.SetBlockCount(cache_numblocks);
|
||||
const size_t total_blocks = NumBlocks(provider->GetNumSamples());
|
||||
for (auto& bmp : bitmaps) bmp.SetBlockCount(total_blocks);
|
||||
}
|
||||
}
|
||||
|
||||
size_t AudioRenderer::NumBlocks(const int64_t samples) const
|
||||
{
|
||||
const double duration = samples * 1000.0 / provider->GetSampleRate();
|
||||
return static_cast<size_t>(duration / pixel_ms / cache_bitmap_width);
|
||||
}
|
||||
|
||||
const wxBitmap *AudioRenderer::GetCachedBitmap(int i, AudioRenderingStyle style)
|
||||
{
|
||||
assert(provider);
|
||||
|
@ -187,7 +196,7 @@ void AudioRenderer::Render(wxDC &dc, wxPoint origin, int start, int length, Audi
|
|||
// And the offset in it to start its use at
|
||||
int firstbitmapoffset = start % cache_bitmap_width;
|
||||
// The last bitmap required
|
||||
int lastbitmap = std::min<int>(end / cache_bitmap_width, cache_numblocks - 1);
|
||||
int lastbitmap = std::min<int>(end / cache_bitmap_width, NumBlocks(provider->GetDecodedSamples()) - 1);
|
||||
|
||||
// Set a clipping region so that the first and last bitmaps don't draw
|
||||
// outside the requested range
|
||||
|
@ -202,9 +211,7 @@ void AudioRenderer::Render(wxDC &dc, wxPoint origin, int start, int length, Audi
|
|||
|
||||
// Now render blank audio from origin to end
|
||||
if (origin.x < lastx)
|
||||
{
|
||||
renderer->RenderBlank(dc, wxRect(origin.x-1, origin.y, lastx-origin.x+1, pixel_height), style);
|
||||
}
|
||||
|
||||
if (needs_age)
|
||||
{
|
||||
|
@ -222,27 +229,18 @@ void AudioRenderer::Invalidate()
|
|||
|
||||
void AudioRendererBitmapProvider::SetProvider(AudioProvider *_provider)
|
||||
{
|
||||
if (provider == _provider) return;
|
||||
|
||||
provider = _provider;
|
||||
|
||||
OnSetProvider();
|
||||
if (compare_and_set(provider, _provider))
|
||||
OnSetProvider();
|
||||
}
|
||||
|
||||
void AudioRendererBitmapProvider::SetMillisecondsPerPixel(double new_pixel_ms)
|
||||
{
|
||||
if (pixel_ms == new_pixel_ms) return;
|
||||
|
||||
pixel_ms = new_pixel_ms;
|
||||
|
||||
OnSetMillisecondsPerPixel();
|
||||
if (compare_and_set(pixel_ms, new_pixel_ms))
|
||||
OnSetMillisecondsPerPixel();
|
||||
}
|
||||
|
||||
void AudioRendererBitmapProvider::SetAmplitudeScale(float _amplitude_scale)
|
||||
{
|
||||
if (amplitude_scale == _amplitude_scale) return;
|
||||
|
||||
amplitude_scale = _amplitude_scale;
|
||||
|
||||
OnSetAmplitudeScale();
|
||||
if (compare_and_set(amplitude_scale, _amplitude_scale))
|
||||
OnSetAmplitudeScale();
|
||||
}
|
||||
|
|
|
@ -100,8 +100,6 @@ class AudioRenderer {
|
|||
|
||||
/// Cached bitmaps for audio ranges
|
||||
std::vector<AudioRendererBitmapCache> bitmaps;
|
||||
/// Number of blocks in the bitmap caches
|
||||
size_t cache_numblocks = 0;
|
||||
/// The maximum allowed size of each bitmap cache, in bytes
|
||||
size_t cache_bitmap_maxsize = 0;
|
||||
/// The maximum allowed size of the renderer's cache, in bytes
|
||||
|
@ -131,11 +129,14 @@ class AudioRenderer {
|
|||
/// has changed.
|
||||
void ResetBlockCount();
|
||||
|
||||
/// Calculate the number of cache blocks needed for a given number of samples
|
||||
size_t NumBlocks(int64_t samples) const;
|
||||
|
||||
public:
|
||||
/// @brief Constructor
|
||||
///
|
||||
/// Initialises audio rendering to a do-nothing state. An audio provider and bitmap
|
||||
/// provider must be set before the audio renderer is functional.
|
||||
/// Initialises audio rendering to a do-nothing state. An audio provider
|
||||
/// and bitmap provider must be set before the audio renderer is functional.
|
||||
AudioRenderer();
|
||||
|
||||
/// @brief Set horizontal zoom
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include <libaegisub/exception.h>
|
||||
#include <libaegisub/fs_fwd.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <boost/filesystem/path.hpp>
|
||||
|
||||
class AudioProvider {
|
||||
|
@ -45,6 +46,7 @@ protected:
|
|||
|
||||
/// for one channel, ie. number of PCM frames
|
||||
int64_t num_samples;
|
||||
std::atomic<int64_t> decoded_samples;
|
||||
int sample_rate;
|
||||
int bytes_per_sample;
|
||||
bool float_samples;
|
||||
|
@ -62,6 +64,7 @@ public:
|
|||
|
||||
agi::fs::path GetFilename() const { return filename; }
|
||||
int64_t GetNumSamples() const { return num_samples; }
|
||||
int64_t GetDecodedSamples() const { return decoded_samples; }
|
||||
int GetSampleRate() const { return sample_rate; }
|
||||
int GetBytesPerSample() const { return bytes_per_sample; }
|
||||
int GetChannels() const { return channels; }
|
||||
|
@ -81,6 +84,7 @@ public:
|
|||
{
|
||||
channels = source->GetChannels();
|
||||
num_samples = source->GetNumSamples();
|
||||
decoded_samples = source->GetDecodedSamples();
|
||||
sample_rate = source->GetSampleRate();
|
||||
bytes_per_sample = source->GetBytesPerSample();
|
||||
float_samples = source->AreSamplesFloat();
|
||||
|
|
Loading…
Reference in New Issue