LibMedia: Play audio through PlaybackManager using Providers/Sinks

This commit implements the functionality to play back audio through
PlaybackManager.

To decode the audio data, AudioDataProviders are created for each track
in the provided media data. These providers will fill their audio block
queue, then sit idle until their corresponding tracks are enabled.

In order to output the audio, one AudioMixingSink is created which
manages a PlaybackStream which requests audio blocks from multiple
AudioDataProviders and mixes them into one buffer with sample-perfect
precision.
This commit is contained in:
Zaggy1024 2025-09-24 13:40:17 -05:00 committed by Jelle Raaijmakers
parent dd052832c1
commit 0ff330c906
14 changed files with 1031 additions and 5 deletions

View File

@ -0,0 +1,62 @@
/*
* Copyright (c) 2025, Gregory Bertilson <gregory@ladybird.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/FixedArray.h>
#include <AK/Time.h>
namespace Media {
class AudioBlock {
public:
using Data = FixedArray<float>;
u32 sample_rate() const { return m_sample_rate; }
u8 channel_count() const { return m_channel_count; }
AK::Duration start_timestamp() const { return m_start_timestamp; }
Data& data() { return m_data; }
Data const& data() const { return m_data; }
void clear()
{
m_sample_rate = 0;
m_channel_count = 0;
m_start_timestamp = AK::Duration::zero();
m_data = Data();
}
template<typename Callback>
void emplace(u32 sample_rate, u8 channel_count, AK::Duration start_timestamp, Callback data_callback)
{
VERIFY(sample_rate != 0);
VERIFY(channel_count != 0);
VERIFY(m_data.is_empty());
m_sample_rate = sample_rate;
m_channel_count = channel_count;
m_start_timestamp = start_timestamp;
data_callback(m_data);
}
bool is_empty() const
{
return m_sample_rate == 0;
}
size_t data_count() const
{
return data().size();
}
size_t sample_count() const
{
return data_count() / m_channel_count;
}
private:
u32 m_sample_rate { 0 };
u8 m_channel_count { 0 };
AK::Duration m_start_timestamp;
Data m_data;
};
}

View File

@ -0,0 +1,29 @@
/*
* Copyright (c) 2025, Gregory Bertilson <gregory@ladybird.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/ByteBuffer.h>
#include <AK/NonnullOwnPtr.h>
#include <AK/Time.h>
#include <AK/Vector.h>
#include <LibMedia/AudioBlock.h>
#include <LibMedia/DecoderError.h>
namespace Media {
class AudioDecoder {
public:
virtual ~AudioDecoder() { }
virtual DecoderErrorOr<void> receive_coded_data(AK::Duration timestamp, ReadonlyBytes coded_data) = 0;
// Writes all buffered audio samples to the provided block.
virtual DecoderErrorOr<void> write_next_block(AudioBlock&) = 0;
virtual void flush() = 0;
};
}

View File

@ -11,7 +11,9 @@ set(SOURCES
Containers/Matroska/MatroskaDemuxer.cpp
Containers/Matroska/Reader.cpp
PlaybackManager.cpp
Providers/AudioDataProvider.cpp
Providers/VideoDataProvider.cpp
Sinks/AudioMixingSink.cpp
Sinks/DisplayingVideoSink.cpp
VideoFrame.cpp
)
@ -21,6 +23,7 @@ target_link_libraries(LibMedia PRIVATE LibCore LibCrypto LibIPC LibGfx LibThread
target_sources(LibMedia PRIVATE
Audio/FFmpegLoader.cpp
FFmpeg/FFmpegAudioDecoder.cpp
FFmpeg/FFmpegDemuxer.cpp
FFmpeg/FFmpegIOContext.cpp
FFmpeg/FFmpegVideoDecoder.cpp

View File

@ -0,0 +1,230 @@
/*
* Copyright (c) 2025, Gregory Bertilson <gregory@ladybird.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <LibCore/System.h>
#include <LibMedia/AudioBlock.h>
#include <LibMedia/FFmpeg/FFmpegHelpers.h>
#include "FFmpegAudioDecoder.h"
namespace Media::FFmpeg {
DecoderErrorOr<NonnullOwnPtr<FFmpegAudioDecoder>> FFmpegAudioDecoder::try_create(CodecID codec_id, ReadonlyBytes codec_initialization_data)
{
AVCodecContext* codec_context = nullptr;
AVPacket* packet = nullptr;
AVFrame* frame = nullptr;
ArmedScopeGuard memory_guard {
[&] {
avcodec_free_context(&codec_context);
av_packet_free(&packet);
av_frame_free(&frame);
}
};
auto ff_codec_id = ffmpeg_codec_id_from_media_codec_id(codec_id);
auto const* codec = avcodec_find_decoder(ff_codec_id);
if (!codec)
return DecoderError::format(DecoderErrorCategory::NotImplemented, "Could not find FFmpeg decoder for codec {}", codec_id);
codec_context = avcodec_alloc_context3(codec);
if (!codec_context)
return DecoderError::format(DecoderErrorCategory::Memory, "Failed to allocate FFmpeg codec context for codec {}", codec_id);
codec_context->time_base = { 1, 1'000'000 };
codec_context->thread_count = static_cast<int>(min(Core::System::hardware_concurrency(), 4));
if (!codec_initialization_data.is_empty()) {
if (codec_initialization_data.size() > NumericLimits<int>::max())
return DecoderError::corrupted("Codec initialization data is too large"sv);
codec_context->extradata = static_cast<u8*>(av_malloc(codec_initialization_data.size() + AV_INPUT_BUFFER_PADDING_SIZE));
if (!codec_context->extradata)
return DecoderError::with_description(DecoderErrorCategory::Memory, "Failed to allocate codec initialization data buffer for FFmpeg codec"sv);
memcpy(codec_context->extradata, codec_initialization_data.data(), codec_initialization_data.size());
codec_context->extradata_size = static_cast<int>(codec_initialization_data.size());
}
if (avcodec_open2(codec_context, codec, nullptr) < 0)
return DecoderError::format(DecoderErrorCategory::Unknown, "Unknown error occurred when opening FFmpeg codec {}", codec_id);
packet = av_packet_alloc();
if (!packet)
return DecoderError::with_description(DecoderErrorCategory::Memory, "Failed to allocate FFmpeg packet"sv);
frame = av_frame_alloc();
if (!frame)
return DecoderError::with_description(DecoderErrorCategory::Memory, "Failed to allocate FFmpeg frame"sv);
memory_guard.disarm();
return DECODER_TRY_ALLOC(try_make<FFmpegAudioDecoder>(codec_context, packet, frame));
}
FFmpegAudioDecoder::FFmpegAudioDecoder(AVCodecContext* codec_context, AVPacket* packet, AVFrame* frame)
: m_codec_context(codec_context)
, m_packet(packet)
, m_frame(frame)
{
}
FFmpegAudioDecoder::~FFmpegAudioDecoder()
{
av_packet_free(&m_packet);
av_frame_free(&m_frame);
avcodec_free_context(&m_codec_context);
}
DecoderErrorOr<void> FFmpegAudioDecoder::receive_coded_data(AK::Duration timestamp, ReadonlyBytes coded_data)
{
VERIFY(coded_data.size() < NumericLimits<int>::max());
m_packet->data = const_cast<u8*>(coded_data.data());
m_packet->size = static_cast<int>(coded_data.size());
m_packet->pts = timestamp.to_microseconds();
m_packet->dts = m_packet->pts;
auto result = avcodec_send_packet(m_codec_context, m_packet);
switch (result) {
case 0:
return {};
case AVERROR(EAGAIN):
return DecoderError::with_description(DecoderErrorCategory::NeedsMoreInput, "FFmpeg decoder cannot decode any more data until frames have been retrieved"sv);
case AVERROR_EOF:
return DecoderError::with_description(DecoderErrorCategory::EndOfStream, "FFmpeg decoder has been flushed"sv);
case AVERROR(EINVAL):
return DecoderError::with_description(DecoderErrorCategory::Invalid, "FFmpeg codec has not been opened"sv);
case AVERROR(ENOMEM):
return DecoderError::with_description(DecoderErrorCategory::Memory, "FFmpeg codec ran out of internal memory"sv);
default:
return DecoderError::with_description(DecoderErrorCategory::Corrupted, "FFmpeg codec reports that the data is corrupted"sv);
}
}
template<typename T>
static float float_sample_from_frame_data(u8** data, size_t plane, size_t index);
template<>
float float_sample_from_frame_data<u8>(u8** data, size_t plane, size_t index)
{
return static_cast<float>(data[plane][index] - 127) / 255;
}
template<typename T>
requires(IsSigned<T>)
static float float_sample_from_frame_data(u8** data, size_t plane, size_t index)
{
auto* pointer = reinterpret_cast<T*>(data[plane]);
return pointer[index] / static_cast<float>(NumericLimits<T>::max());
}
template<typename T>
requires(IsFloatingPoint<T>)
static float float_sample_from_frame_data(u8** data, size_t plane, size_t index)
{
auto* pointer = reinterpret_cast<T*>(data[plane]);
return pointer[index];
}
DecoderErrorOr<void> FFmpegAudioDecoder::write_next_block(AudioBlock& block)
{
auto result = avcodec_receive_frame(m_codec_context, m_frame);
switch (result) {
case 0: {
auto timestamp = AK::Duration::from_microseconds(m_frame->pts);
if (m_frame->ch_layout.nb_channels > 2)
return DecoderError::not_implemented();
VERIFY(m_frame->sample_rate > 0);
VERIFY(m_frame->ch_layout.nb_channels > 0);
block.emplace(m_frame->sample_rate, m_frame->ch_layout.nb_channels, timestamp, [&](AudioBlock::Data& data) {
auto format = static_cast<AVSampleFormat>(m_frame->format);
auto is_planar = av_sample_fmt_is_planar(format) != 0;
auto planar_format = av_get_planar_sample_fmt(format);
VERIFY(m_frame->nb_samples >= 0);
auto sample_count = static_cast<size_t>(m_frame->nb_samples);
auto channel_count = static_cast<size_t>(m_frame->ch_layout.nb_channels);
auto count = sample_count * channel_count;
data = MUST(AudioBlock::Data::create(count));
auto sample_size = [&] {
switch (planar_format) {
case AV_SAMPLE_FMT_U8P:
return sizeof(u8);
case AV_SAMPLE_FMT_S16P:
return sizeof(i16);
case AV_SAMPLE_FMT_S32P:
return sizeof(i32);
case AV_SAMPLE_FMT_FLTP:
return sizeof(float);
case AV_SAMPLE_FMT_DBLP:
return sizeof(double);
case AV_SAMPLE_FMT_S64P:
return sizeof(i64);
default:
VERIFY_NOT_REACHED();
}
}();
VERIFY(m_frame->linesize[0] > 0);
if (is_planar)
VERIFY(static_cast<size_t>(m_frame->linesize[0]) >= sample_count * sample_size);
else
VERIFY(static_cast<size_t>(m_frame->linesize[0]) >= sample_count * channel_count * sample_size);
for (size_t i = 0; i < count; i++) {
size_t plane = 0;
size_t index_in_plane = i;
if (is_planar) {
plane = i % channel_count;
index_in_plane = i / channel_count;
}
auto float_sample = [&] {
switch (planar_format) {
case AV_SAMPLE_FMT_U8P:
return float_sample_from_frame_data<u8>(m_frame->extended_data, plane, index_in_plane);
case AV_SAMPLE_FMT_S16P:
return float_sample_from_frame_data<i16>(m_frame->extended_data, plane, index_in_plane);
case AV_SAMPLE_FMT_S32P:
return float_sample_from_frame_data<i32>(m_frame->extended_data, plane, index_in_plane);
case AV_SAMPLE_FMT_FLTP:
return float_sample_from_frame_data<float>(m_frame->extended_data, plane, index_in_plane);
case AV_SAMPLE_FMT_DBLP:
return float_sample_from_frame_data<double>(m_frame->extended_data, plane, index_in_plane);
case AV_SAMPLE_FMT_S64P:
return float_sample_from_frame_data<i64>(m_frame->extended_data, plane, index_in_plane);
default:
VERIFY_NOT_REACHED();
}
}();
data[i] = float_sample;
}
});
return {};
}
case AVERROR(EAGAIN):
return DecoderError::with_description(DecoderErrorCategory::NeedsMoreInput, "FFmpeg decoder has no frames available, send more input"sv);
case AVERROR_EOF:
return DecoderError::with_description(DecoderErrorCategory::EndOfStream, "FFmpeg decoder has been flushed"sv);
case AVERROR(EINVAL):
return DecoderError::with_description(DecoderErrorCategory::Invalid, "FFmpeg codec has not been opened"sv);
default:
return DecoderError::format(DecoderErrorCategory::Unknown, "FFmpeg codec encountered an unexpected error retrieving frames with code {:x}", result);
}
}
void FFmpegAudioDecoder::flush()
{
avcodec_flush_buffers(m_codec_context);
}
}

View File

@ -0,0 +1,34 @@
/*
* Copyright (c) 2025, Gregory Bertilson <gregory@ladybird.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <LibMedia/AudioDecoder.h>
#include <LibMedia/CodecID.h>
#include <LibMedia/Export.h>
#include <LibMedia/FFmpeg/FFmpegForward.h>
namespace Media::FFmpeg {
class MEDIA_API FFmpegAudioDecoder final : public AudioDecoder {
public:
static DecoderErrorOr<NonnullOwnPtr<FFmpegAudioDecoder>> try_create(CodecID, ReadonlyBytes codec_initialization_data);
FFmpegAudioDecoder(AVCodecContext* codec_context, AVPacket* packet, AVFrame* frame);
virtual ~FFmpegAudioDecoder() override;
virtual DecoderErrorOr<void> receive_coded_data(AK::Duration timestamp, ReadonlyBytes coded_data) override;
// Writes all buffered audio samples to the provided block.
virtual DecoderErrorOr<void> write_next_block(AudioBlock&) override;
virtual void flush() override;
private:
AVCodecContext* m_codec_context;
AVPacket* m_packet;
AVFrame* m_frame;
};
}

View File

@ -10,6 +10,10 @@
namespace Media {
class CodedFrame;
class AudioDataProvider;
class AudioDecoder;
class AudioMixingSink;
class AudioSink;
class DecoderError;
class Demuxer;
class DisplayingVideoSink;

View File

@ -6,7 +6,9 @@
#include <LibMedia/FFmpeg/FFmpegDemuxer.h>
#include <LibMedia/MutexedDemuxer.h>
#include <LibMedia/Providers/AudioDataProvider.h>
#include <LibMedia/Providers/VideoDataProvider.h>
#include <LibMedia/Sinks/AudioMixingSink.h>
#include <LibMedia/Sinks/DisplayingVideoSink.h>
#include <LibMedia/Track.h>
@ -39,20 +41,45 @@ DecoderErrorOr<NonnullRefPtr<PlaybackManager>> PlaybackManager::try_create(Nonnu
supported_video_tracks.shrink_to_fit();
supported_video_track_datas.shrink_to_fit();
if (supported_video_tracks.is_empty())
return DecoderError::with_description(DecoderErrorCategory::NotImplemented, "No supported video tracks found"sv);
// Create all the audio tracks, their data providers, and the audio output.
auto all_audio_tracks = TRY(demuxer->get_tracks_for_type(TrackType::Audio));
auto playback_manager = DECODER_TRY_ALLOC(adopt_nonnull_ref_or_enomem(new (nothrow) PlaybackManager(demuxer, weak_playback_manager, move(supported_video_tracks), move(supported_video_track_datas))));
auto supported_audio_tracks = AudioTracks();
auto supported_audio_track_datas = AudioTrackDatas();
supported_audio_tracks.ensure_capacity(all_audio_tracks.size());
supported_audio_track_datas.ensure_capacity(all_audio_tracks.size());
for (auto const& track : all_audio_tracks) {
auto audio_data_provider_result = AudioDataProvider::try_create(demuxer, track);
if (audio_data_provider_result.is_error())
continue;
auto audio_data_provider = audio_data_provider_result.release_value();
supported_audio_tracks.append(track);
supported_audio_track_datas.empend(AudioTrackData(track, move(audio_data_provider)));
}
supported_audio_tracks.shrink_to_fit();
supported_audio_track_datas.shrink_to_fit();
if (supported_video_tracks.is_empty() && supported_audio_tracks.is_empty())
return DecoderError::with_description(DecoderErrorCategory::NotImplemented, "No supported video or audio tracks found"sv);
RefPtr<AudioMixingSink> audio_sink = nullptr;
if (!supported_audio_tracks.is_empty())
audio_sink = DECODER_TRY_ALLOC(AudioMixingSink::try_create());
auto playback_manager = DECODER_TRY_ALLOC(adopt_nonnull_ref_or_enomem(new (nothrow) PlaybackManager(demuxer, weak_playback_manager, move(supported_video_tracks), move(supported_video_track_datas), audio_sink, move(supported_audio_tracks), move(supported_audio_track_datas))));
weak_playback_manager->m_manager = playback_manager;
playback_manager->set_up_error_handlers();
return playback_manager;
}
PlaybackManager::PlaybackManager(NonnullRefPtr<MutexedDemuxer> const& demuxer, NonnullRefPtr<WeakPlaybackManager> const& weak_wrapper, VideoTracks&& video_tracks, VideoTrackDatas&& video_track_datas)
PlaybackManager::PlaybackManager(NonnullRefPtr<MutexedDemuxer> const& demuxer, NonnullRefPtr<WeakPlaybackManager> const& weak_wrapper, VideoTracks&& video_tracks, VideoTrackDatas&& video_track_datas, RefPtr<AudioMixingSink> const& audio_sink, AudioTracks&& audio_tracks, AudioTrackDatas&& audio_track_datas)
: m_demuxer(demuxer)
, m_weak_wrapper(weak_wrapper)
, m_video_tracks(video_tracks)
, m_video_track_datas(video_track_datas)
, m_audio_sink(audio_sink)
, m_audio_tracks(audio_tracks)
, m_audio_track_datas(audio_track_datas)
, m_real_time_base(MonotonicTime::now())
{
}
@ -72,6 +99,15 @@ void PlaybackManager::set_up_error_handlers()
self->dispatch_error(move(error));
});
}
for (auto const& audio_track_data : m_audio_track_datas) {
audio_track_data.provider->set_error_handler([weak_self = m_weak_wrapper](DecoderError&& error) {
auto self = weak_self->take_strong();
if (!self)
return;
self->dispatch_error(move(error));
});
}
}
void PlaybackManager::dispatch_error(DecoderError&& error)
@ -101,6 +137,14 @@ Optional<Track> PlaybackManager::preferred_video_track()
return result;
}
Optional<Track> PlaybackManager::preferred_audio_track()
{
auto result = m_demuxer->get_preferred_track_for_type(TrackType::Audio).value_or({});
if (result.has_value() && !m_audio_tracks.contains_slow(result.value()))
return {};
return result;
}
PlaybackManager::VideoTrackData& PlaybackManager::get_video_data_for_track(Track const& track)
{
for (auto& track_data : m_video_track_datas) {
@ -131,4 +175,30 @@ void PlaybackManager::remove_the_displaying_video_sink_for_track(Track const& tr
track_data.display = nullptr;
}
PlaybackManager::AudioTrackData& PlaybackManager::get_audio_data_for_track(Track const& track)
{
for (auto& track_data : m_audio_track_datas) {
if (track_data.track == track)
return track_data;
}
VERIFY_NOT_REACHED();
}
void PlaybackManager::enable_an_audio_track(Track const& track)
{
auto& track_data = get_audio_data_for_track(track);
auto had_provider = m_audio_sink->provider(track) != nullptr;
m_audio_sink->set_provider(track, track_data.provider);
if (!had_provider)
track_data.provider->seek(current_time());
}
void PlaybackManager::disable_an_audio_track(Track const& track)
{
auto& track_data = get_audio_data_for_track(track);
VERIFY(track_data.provider == m_audio_sink->provider(track));
m_audio_sink->set_provider(track, nullptr);
}
}

View File

@ -32,6 +32,10 @@ public:
using VideoTracks = Vector<Track, EXPECTED_VIDEO_TRACK_COUNT>;
static constexpr size_t EXPECTED_AUDIO_TRACK_COUNT = 1;
using AudioTracks = Vector<Track, EXPECTED_AUDIO_TRACK_COUNT>;
static DecoderErrorOr<NonnullRefPtr<PlaybackManager>> try_create(NonnullOwnPtr<SeekableStream>&& stream);
~PlaybackManager();
@ -41,6 +45,9 @@ public:
VideoTracks const& video_tracks() const { return m_video_tracks; }
Optional<Track> preferred_video_track();
VideoTracks const& audio_tracks() const { return m_audio_tracks; }
Optional<Track> preferred_audio_track();
// Creates a DisplayingVideoSink for the specified track.
//
// Note that in order for the current frame to change based on the media time, users must call
@ -50,6 +57,9 @@ public:
// retrieving any subsequent frames from the decoder.
void remove_the_displaying_video_sink_for_track(Track const& track);
void enable_an_audio_track(Track const& track);
void disable_an_audio_track(Track const& track);
Function<void(DecoderError&&)> on_error;
private:
@ -91,12 +101,19 @@ private:
};
using VideoTrackDatas = Vector<VideoTrackData, EXPECTED_VIDEO_TRACK_COUNT>;
PlaybackManager(NonnullRefPtr<MutexedDemuxer> const&, NonnullRefPtr<WeakPlaybackManager> const&, VideoTracks&&, VideoTrackDatas&&);
struct AudioTrackData {
Track track;
NonnullRefPtr<AudioDataProvider> provider;
};
using AudioTrackDatas = Vector<AudioTrackData, EXPECTED_AUDIO_TRACK_COUNT>;
PlaybackManager(NonnullRefPtr<MutexedDemuxer> const&, NonnullRefPtr<WeakPlaybackManager> const&, VideoTracks&&, VideoTrackDatas&&, RefPtr<AudioMixingSink> const&, AudioTracks&&, AudioTrackDatas&&);
void set_up_error_handlers();
void dispatch_error(DecoderError&&);
VideoTrackData& get_video_data_for_track(Track const& track);
AudioTrackData& get_audio_data_for_track(Track const& track);
NonnullRefPtr<MutexedDemuxer> m_demuxer;
@ -105,6 +122,10 @@ private:
VideoTracks m_video_tracks;
VideoTrackDatas m_video_track_datas;
RefPtr<AudioMixingSink> m_audio_sink;
AudioTracks m_audio_tracks;
AudioTrackDatas m_audio_track_datas;
MonotonicTime m_real_time_base;
bool m_is_in_error_state { false };

View File

@ -0,0 +1,171 @@
/*
* Copyright (c) 2025, Gregory Bertilson <gregory@ladybird.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Debug.h>
#include <LibCore/EventLoop.h>
#include <LibMedia/FFmpeg/FFmpegAudioDecoder.h>
#include <LibMedia/MutexedDemuxer.h>
#include <LibMedia/Sinks/AudioSink.h>
#include <LibThreading/Mutex.h>
#include <LibThreading/Thread.h>
#include "AudioDataProvider.h"
namespace Media {
DecoderErrorOr<NonnullRefPtr<AudioDataProvider>> AudioDataProvider::try_create(NonnullRefPtr<MutexedDemuxer> const& demuxer, Track const& track)
{
auto codec_id = TRY(demuxer->get_codec_id_for_track(track));
auto codec_initialization_data = TRY(demuxer->get_codec_initialization_data_for_track(track));
auto decoder = DECODER_TRY_ALLOC(FFmpeg::FFmpegAudioDecoder::try_create(codec_id, codec_initialization_data));
auto thread_data = DECODER_TRY_ALLOC(try_make_ref_counted<AudioDataProvider::ThreadData>(demuxer, track, move(decoder)));
auto provider = DECODER_TRY_ALLOC(try_make_ref_counted<AudioDataProvider>(thread_data));
auto thread = DECODER_TRY_ALLOC(Threading::Thread::try_create([thread_data]() -> int {
while (!thread_data->should_thread_exit())
thread_data->push_data_and_decode_a_block();
return 0;
}));
thread->start();
thread->detach();
return provider;
}
AudioDataProvider::AudioDataProvider(NonnullRefPtr<ThreadData> const& thread_data)
: m_thread_data(thread_data)
{
}
AudioDataProvider::~AudioDataProvider()
{
m_thread_data->exit();
}
void AudioDataProvider::set_error_handler(ErrorHandler&& handler)
{
m_thread_data->set_error_handler(move(handler));
}
void AudioDataProvider::seek(AK::Duration timestamp)
{
m_thread_data->seek(timestamp);
}
AudioDataProvider::ThreadData::ThreadData(NonnullRefPtr<MutexedDemuxer> const& demuxer, Track const& track, NonnullOwnPtr<AudioDecoder>&& decoder)
: m_main_thread_event_loop(Core::EventLoop::current())
, m_demuxer(demuxer)
, m_track(track)
, m_decoder(move(decoder))
{
}
AudioDataProvider::ThreadData::~ThreadData() = default;
void AudioDataProvider::ThreadData::set_error_handler(ErrorHandler&& handler)
{
auto locker = take_lock();
m_error_handler = move(handler);
m_wait_condition.broadcast();
}
AudioBlock AudioDataProvider::retrieve_block()
{
auto locker = m_thread_data->take_lock();
if (m_thread_data->queue().is_empty())
return AudioBlock();
auto result = m_thread_data->queue().dequeue();
m_thread_data->wake();
return result;
}
void AudioDataProvider::ThreadData::exit()
{
m_exit = true;
m_wait_condition.broadcast();
}
void AudioDataProvider::ThreadData::seek(AK::Duration timestamp)
{
auto demuxer_result = m_demuxer->seek_to_most_recent_keyframe(m_track, timestamp);
if (demuxer_result.is_error()) {
m_error_handler(demuxer_result.release_error());
} else {
auto locker = take_lock();
m_is_in_error_state = false;
m_wait_condition.broadcast();
}
}
bool AudioDataProvider::ThreadData::should_thread_exit() const
{
return m_exit;
}
void AudioDataProvider::ThreadData::push_data_and_decode_a_block()
{
#if PLAYBACK_MANAGER_DEBUG
auto start_time = MonotonicTime::now();
#endif
auto set_error_and_wait_for_seek = [this](DecoderError&& error) {
auto locker = take_lock();
m_is_in_error_state = true;
while (!m_error_handler)
m_wait_condition.wait();
m_main_thread_event_loop.deferred_invoke([this, error = move(error)] mutable {
m_error_handler(move(error));
});
dbgln_if(PLAYBACK_MANAGER_DEBUG, "Audio Data Provider: Encountered an error, waiting for a seek to start decoding again...");
while (m_is_in_error_state)
m_wait_condition.wait();
};
auto sample_result = m_demuxer->get_next_sample_for_track(m_track);
if (sample_result.is_error()) {
if (sample_result.error().category() == DecoderErrorCategory::NeedsMoreInput) {
return;
}
// FIXME: Handle the end of the stream.
set_error_and_wait_for_seek(sample_result.release_error());
return;
}
auto sample = sample_result.release_value();
auto decode_result = m_decoder->receive_coded_data(sample.timestamp(), sample.data());
if (decode_result.is_error()) {
set_error_and_wait_for_seek(decode_result.release_error());
return;
}
while (true) {
auto locker = take_lock();
while (m_queue.size() >= m_queue_max_size) {
m_wait_condition.wait();
if (should_thread_exit())
return;
}
auto block = AudioBlock();
auto timestamp_result = m_decoder->write_next_block(block);
if (timestamp_result.is_error()) {
if (timestamp_result.error().category() == DecoderErrorCategory::NeedsMoreInput)
break;
set_error_and_wait_for_seek(timestamp_result.release_error());
break;
}
// FIXME: Specify trailing samples in the demuxer, and drop them here or in the audio decoder implementation.
VERIFY(!block.is_empty());
m_queue.enqueue(move(block));
VERIFY(!m_queue.tail().is_empty());
}
}
}

View File

@ -0,0 +1,88 @@
/*
* Copyright (c) 2025, Gregory Bertilson <gregory@ladybird.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/AtomicRefCounted.h>
#include <AK/NonnullOwnPtr.h>
#include <AK/NonnullRefPtr.h>
#include <AK/Queue.h>
#include <AK/Time.h>
#include <LibCore/Forward.h>
#include <LibMedia/AudioBlock.h>
#include <LibMedia/DecoderError.h>
#include <LibMedia/Export.h>
#include <LibMedia/Forward.h>
#include <LibMedia/Track.h>
#include <LibThreading/ConditionVariable.h>
#include <LibThreading/Forward.h>
#include <LibThreading/Mutex.h>
namespace Media {
// Retrieves coded data from a demuxer and decodes it asynchronously into audio samples to push to an AudioSink.
class MEDIA_API AudioDataProvider : public AtomicRefCounted<AudioDataProvider> {
class ThreadData;
public:
static constexpr size_t QUEUE_CAPACITY = 16;
using AudioQueue = Queue<AudioBlock, QUEUE_CAPACITY>;
using ErrorHandler = Function<void(DecoderError&&)>;
static DecoderErrorOr<NonnullRefPtr<AudioDataProvider>> try_create(NonnullRefPtr<MutexedDemuxer> const& demuxer, Track const& track);
AudioDataProvider(NonnullRefPtr<ThreadData> const&);
~AudioDataProvider();
void set_error_handler(ErrorHandler&&);
AudioBlock retrieve_block();
void seek(AK::Duration timestamp);
private:
class ThreadData final : public AtomicRefCounted<ThreadData> {
public:
ThreadData(NonnullRefPtr<MutexedDemuxer> const&, Track const&, NonnullOwnPtr<AudioDecoder>&&);
~ThreadData();
void set_error_handler(ErrorHandler&&);
bool should_thread_exit() const;
void push_data_and_decode_a_block();
void exit();
void set_stopped(bool);
bool is_stopped() const;
void seek(AK::Duration timestamp);
[[nodiscard]] Threading::MutexLocker take_lock() { return Threading::MutexLocker(m_mutex); }
void wake() { m_wait_condition.broadcast(); }
AudioDecoder const& decoder() const { return *m_decoder; }
AudioQueue& queue() { return m_queue; }
private:
Core::EventLoop& m_main_thread_event_loop;
Threading::Mutex m_mutex;
Threading::ConditionVariable m_wait_condition { m_mutex };
Atomic<bool> m_exit { false };
NonnullRefPtr<MutexedDemuxer> m_demuxer;
Track m_track;
NonnullOwnPtr<AudioDecoder> m_decoder;
size_t m_queue_max_size { 8 };
AudioQueue m_queue;
ErrorHandler m_error_handler;
bool m_is_in_error_state { false };
};
NonnullRefPtr<ThreadData> m_thread_data;
};
}

View File

@ -0,0 +1,202 @@
/*
* Copyright (c) 2025, Gregory Bertilson <gregory@ladybird.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Time.h>
#include <LibMedia/Audio/PlaybackStream.h>
#include <LibMedia/Providers/AudioDataProvider.h>
#include "AudioMixingSink.h"
namespace Media {
ErrorOr<NonnullRefPtr<AudioMixingSink>> AudioMixingSink::try_create()
{
auto weak_ref = TRY(try_make_ref_counted<AudioMixingSinkWeakReference>());
auto sink = TRY(try_make_ref_counted<AudioMixingSink>(weak_ref));
weak_ref->emplace(sink);
return sink;
}
AudioMixingSink::AudioMixingSink(AudioMixingSinkWeakReference& weak_ref)
: m_main_thread_event_loop(Core::EventLoop::current())
, m_weak_self(weak_ref)
{
}
AudioMixingSink::~AudioMixingSink()
{
m_weak_self->revoke();
}
void AudioMixingSink::deferred_create_playback_stream(Track const& track)
{
m_main_thread_event_loop.deferred_invoke([weak_self = m_weak_self, track = track] {
auto self = weak_self->take_strong();
if (self == nullptr)
return;
auto optional_track_mixing_data = self->m_track_mixing_datas.get(track);
if (!optional_track_mixing_data.has_value())
return;
Threading::MutexLocker locker { self->m_mutex };
auto& track_mixing_data = optional_track_mixing_data.release_value();
if (track_mixing_data.current_block.is_empty())
track_mixing_data.current_block = track_mixing_data.provider->retrieve_block();
if (!track_mixing_data.current_block.is_empty()) {
self->create_playback_stream(track_mixing_data.current_block.sample_rate(), track_mixing_data.current_block.channel_count());
return;
}
self->deferred_create_playback_stream(track);
});
}
void AudioMixingSink::set_provider(Track const& track, RefPtr<AudioDataProvider> const& provider)
{
Threading::MutexLocker locker { m_mutex };
m_track_mixing_datas.remove(track);
if (provider == nullptr)
return;
m_track_mixing_datas.set(track, TrackMixingData(*provider));
deferred_create_playback_stream(track);
}
RefPtr<AudioDataProvider> AudioMixingSink::provider(Track const& track) const
{
auto mixing_data = m_track_mixing_datas.get(track);
if (!mixing_data.has_value())
return nullptr;
return mixing_data->provider;
}
static inline i64 duration_to_sample(AK::Duration duration, u32 sample_rate)
{
VERIFY(sample_rate != 0);
auto seconds = duration.to_truncated_seconds();
auto nanoseconds = (duration - AK::Duration::from_seconds(seconds)).to_nanoseconds();
auto sample = seconds * sample_rate;
sample += nanoseconds * sample_rate / 1'000'000'000;
return sample;
}
void AudioMixingSink::create_playback_stream(u32 sample_rate, u32 channel_count)
{
if (m_playback_stream_sample_rate >= sample_rate && m_playback_stream_channel_count >= channel_count) {
VERIFY(m_playback_stream);
return;
}
Threading::MutexLocker playback_stream_change_locker { m_mutex };
auto callback = [=, weak_self = m_weak_self](Bytes buffer, Audio::PcmSampleFormat format, size_t sample_count) -> ReadonlyBytes {
auto self = weak_self->take_strong();
if (!self)
return buffer.trim(0);
VERIFY(format == Audio::PcmSampleFormat::Float32);
VERIFY(!Checked<i64>::multiplication_would_overflow(sample_count, channel_count));
auto float_buffer_count = static_cast<i64>(sample_count) * channel_count;
auto float_buffer_size = float_buffer_count * sizeof(float);
VERIFY(buffer.size() >= float_buffer_size);
auto float_buffer = buffer.reinterpret<float>();
float_buffer.fill(0.0f);
Threading::MutexLocker mixing_data_locker { self->m_mutex };
if (sample_rate != self->m_playback_stream_sample_rate || channel_count != self->m_playback_stream_channel_count)
return buffer.trim(0);
auto buffer_start = self->m_next_sample_to_write;
for (auto& [track, track_data] : self->m_track_mixing_datas) {
auto next_sample = buffer_start;
auto samples_end = next_sample + static_cast<i64>(sample_count);
auto go_to_next_block = [&] {
auto new_block = track_data.provider->retrieve_block();
if (new_block.is_empty())
return false;
auto new_block_first_sample_offset = duration_to_sample(new_block.start_timestamp(), sample_rate);
if (!track_data.current_block.is_empty() && track_data.current_block.sample_rate() == sample_rate && track_data.current_block.channel_count() == channel_count) {
auto current_block_end = track_data.current_block_first_sample_offset + static_cast<i64>(track_data.current_block.sample_count());
new_block_first_sample_offset = max(new_block_first_sample_offset, current_block_end);
}
track_data.current_block = move(new_block);
track_data.current_block_first_sample_offset = new_block_first_sample_offset;
return true;
};
if (track_data.current_block.is_empty()) {
if (!go_to_next_block())
continue;
}
while (!track_data.current_block.is_empty()) {
auto& current_block = track_data.current_block;
auto current_block_data_count = static_cast<i64>(current_block.data_count());
auto current_block_sample_count = static_cast<i64>(current_block.sample_count());
if (current_block.sample_rate() != sample_rate || current_block.channel_count() != channel_count) {
current_block.clear();
continue;
}
auto first_sample_offset = track_data.current_block_first_sample_offset;
if (first_sample_offset >= samples_end)
break;
auto block_end = first_sample_offset + current_block_sample_count;
if (block_end <= next_sample) {
if (!go_to_next_block())
break;
continue;
}
next_sample = max(next_sample, first_sample_offset);
auto index_in_block = (next_sample - first_sample_offset) * channel_count;
VERIFY(index_in_block < current_block_data_count);
auto index_in_buffer = (next_sample - buffer_start) * channel_count;
VERIFY(index_in_buffer < float_buffer_count);
auto write_count = current_block_data_count - index_in_block;
write_count = min(write_count, float_buffer_count - index_in_buffer);
VERIFY(write_count > 0);
VERIFY(index_in_buffer + write_count <= float_buffer_count);
VERIFY(write_count % channel_count == 0);
for (i64 i = 0; i < write_count; i++)
float_buffer[index_in_buffer + i] += current_block.data()[index_in_block + i];
auto write_end = index_in_block + write_count;
if (write_end == current_block_data_count) {
if (!go_to_next_block())
break;
continue;
}
VERIFY(write_end < current_block_data_count);
next_sample += write_count / channel_count;
if (next_sample == samples_end)
break;
VERIFY(next_sample < samples_end);
}
}
self->m_next_sample_to_write += static_cast<i64>(sample_count);
return buffer.slice(0, float_buffer_size);
};
constexpr u32 target_latency_ms = 100;
m_playback_stream = MUST(Audio::PlaybackStream::create(Audio::OutputState::Playing, sample_rate, channel_count, target_latency_ms, move(callback)));
m_playback_stream_sample_rate = sample_rate;
m_playback_stream_channel_count = channel_count;
}
}

View File

@ -0,0 +1,84 @@
/*
* Copyright (c) 2025, Gregory Bertilson <gregory@ladybird.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/HashMap.h>
#include <AK/NonnullRefPtr.h>
#include <AK/RefPtr.h>
#include <LibCore/EventLoop.h>
#include <LibMedia/Audio/Forward.h>
#include <LibMedia/Audio/SampleFormats.h>
#include <LibMedia/Export.h>
#include <LibMedia/Forward.h>
#include <LibMedia/Sinks/AudioSink.h>
#include <LibThreading/ConditionVariable.h>
#include <LibThreading/Mutex.h>
namespace Media {
class MEDIA_API AudioMixingSink final : public AudioSink {
class AudioMixingSinkWeakReference;
public:
static ErrorOr<NonnullRefPtr<AudioMixingSink>> try_create();
AudioMixingSink(AudioMixingSinkWeakReference&);
virtual ~AudioMixingSink() override;
virtual void set_provider(Track const&, RefPtr<AudioDataProvider> const&) override;
virtual RefPtr<AudioDataProvider> provider(Track const&) const override;
private:
static constexpr size_t MAX_BLOCK_COUNT = 16;
class AudioMixingSinkWeakReference : public AtomicRefCounted<AudioMixingSinkWeakReference> {
public:
void emplace(AudioMixingSink& sink) { m_ptr = &sink; }
RefPtr<AudioMixingSink> take_strong() const
{
Threading::MutexLocker locker { m_mutex };
return m_ptr;
}
void revoke()
{
Threading::MutexLocker locker { m_mutex };
m_ptr = nullptr;
}
private:
mutable Threading::Mutex m_mutex;
AudioMixingSink* m_ptr { nullptr };
};
struct TrackMixingData {
TrackMixingData(NonnullRefPtr<AudioDataProvider> const& provider)
: provider(provider)
{
}
NonnullRefPtr<AudioDataProvider> provider;
AudioBlock current_block;
i64 current_block_first_sample_offset { NumericLimits<i64>::min() };
};
void deferred_create_playback_stream(Track const& track);
void create_playback_stream(u32 sample_rate, u32 channel_count);
ReadonlyBytes write_audio_data_to_playback_stream(Bytes buffer, Audio::PcmSampleFormat format, size_t sample_count);
Core::EventLoop& m_main_thread_event_loop;
NonnullRefPtr<AudioMixingSinkWeakReference> m_weak_self;
Threading::Mutex m_mutex;
Threading::ConditionVariable m_wait_condition { m_mutex };
RefPtr<Audio::PlaybackStream> m_playback_stream;
u32 m_playback_stream_sample_rate { 0 };
u32 m_playback_stream_channel_count { 0 };
HashMap<Track, TrackMixingData> m_track_mixing_datas;
i64 m_next_sample_to_write { 0 };
};
}

View File

@ -0,0 +1,25 @@
/*
* Copyright (c) 2025, Gregory Bertilson <gregory@ladybird.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/AtomicRefCounted.h>
#include <LibMedia/AudioBlock.h>
#include <LibMedia/Export.h>
#include <LibMedia/Forward.h>
#include <LibMedia/Track.h>
namespace Media {
class MEDIA_API AudioSink : public AtomicRefCounted<AudioSink> {
public:
virtual ~AudioSink() = default;
virtual void set_provider(Track const&, RefPtr<AudioDataProvider> const&) = 0;
virtual RefPtr<AudioDataProvider> provider(Track const&) const = 0;
};
}

View File

@ -13,7 +13,9 @@ shared_library("LibMedia") {
"Containers/Matroska/MatroskaDemuxer.cpp",
"Containers/Matroska/Reader.cpp",
"PlaybackManager.cpp",
"Providers/AudioDataProvider.cpp",
"Providers/VideoDataProvider.cpp",
"Sinks/AudioMixingSink.cpp",
"Sinks/DisplayingVideoSink.cpp",
"VideoFrame.cpp",
]
@ -26,6 +28,7 @@ shared_library("LibMedia") {
if (enable_ffmpeg) {
sources += [
"Audio/FFmpegLoader.cpp",
"FFmpeg/FFmpegAudioDecoder.cpp",
"FFmpeg/FFmpegVideoDecoder.cpp",
]
} else {