LibMedia: Support coded audio frames in our demuxers

This adds a new variant of the metadata storage in CodedFrame for audio
frames, called CodedAudioFrameData.
This commit is contained in:
Zaggy1024 2025-09-23 18:30:41 -05:00 committed by Jelle Raaijmakers
parent 6caa2f99aa
commit 6b34003c2c
4 changed files with 52 additions and 15 deletions

View File

@ -0,0 +1,18 @@
/*
* Copyright (c) 2025, Gregory Bertilson <gregory@ladybird.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
namespace Media {
class CodedAudioFrameData {
public:
CodedAudioFrameData()
{
}
};
}

View File

@ -8,13 +8,14 @@
#include <AK/ByteBuffer.h>
#include <AK/Time.h>
#include <LibMedia/CodedAudioFrameData.h>
#include <LibMedia/CodedVideoFrameData.h>
namespace Media {
class CodedFrame final {
public:
using AuxiliaryData = Variant<CodedVideoFrameData>;
using AuxiliaryData = Variant<CodedVideoFrameData, CodedAudioFrameData>;
CodedFrame(AK::Duration timestamp, ByteBuffer&& data, AuxiliaryData auxiliary_data)
: m_timestamp(timestamp)

View File

@ -190,9 +190,18 @@ DecoderErrorOr<CodedFrame> MatroskaDemuxer::get_next_sample_for_track(Track trac
status.block = TRY(status.iterator.next_block());
status.frame_index = 0;
}
auto cicp = TRY(m_reader.track_for_track_number(track.identifier()))->video_track()->color_format.to_cicp();
auto aux_data = [&] -> CodedFrame::AuxiliaryData {
if (track.type() == TrackType::Video) {
auto cicp = MUST(m_reader.track_for_track_number(track.identifier()))->video_track()->color_format.to_cicp();
return CodedVideoFrameData(cicp);
}
if (track.type() == TrackType::Audio) {
return CodedAudioFrameData();
}
VERIFY_NOT_REACHED();
}();
auto sample_data = DECODER_TRY_ALLOC(ByteBuffer::copy(status.block->frame(status.frame_index++)));
return CodedFrame(status.block->timestamp(), move(sample_data), CodedVideoFrameData(cicp));
return CodedFrame(status.block->timestamp(), move(sample_data), aux_data);
}
DecoderErrorOr<AK::Duration> MatroskaDemuxer::total_duration()

View File

@ -181,18 +181,27 @@ DecoderErrorOr<CodedFrame> FFmpegDemuxer::get_next_sample_for_track(Track track)
continue;
}
auto color_primaries = static_cast<ColorPrimaries>(stream->codecpar->color_primaries);
auto transfer_characteristics = static_cast<TransferCharacteristics>(stream->codecpar->color_trc);
auto matrix_coefficients = static_cast<MatrixCoefficients>(stream->codecpar->color_space);
auto color_range = [stream] {
switch (stream->codecpar->color_range) {
case AVColorRange::AVCOL_RANGE_MPEG:
return VideoFullRangeFlag::Studio;
case AVColorRange::AVCOL_RANGE_JPEG:
return VideoFullRangeFlag::Full;
default:
return VideoFullRangeFlag::Unspecified;
auto auxiliary_data = [&]() -> CodedFrame::AuxiliaryData {
if (track.type() == TrackType::Video) {
auto color_primaries = static_cast<ColorPrimaries>(stream->codecpar->color_primaries);
auto transfer_characteristics = static_cast<TransferCharacteristics>(stream->codecpar->color_trc);
auto matrix_coefficients = static_cast<MatrixCoefficients>(stream->codecpar->color_space);
auto color_range = [stream] {
switch (stream->codecpar->color_range) {
case AVColorRange::AVCOL_RANGE_MPEG:
return VideoFullRangeFlag::Studio;
case AVColorRange::AVCOL_RANGE_JPEG:
return VideoFullRangeFlag::Full;
default:
return VideoFullRangeFlag::Unspecified;
}
}();
return CodedVideoFrameData(CodingIndependentCodePoints(color_primaries, transfer_characteristics, matrix_coefficients, color_range));
}
if (track.type() == TrackType::Audio) {
return CodedAudioFrameData();
}
VERIFY_NOT_REACHED();
}();
// Copy the packet data so that we have a permanent reference to it whilst the Sample is alive, which allows us
@ -202,7 +211,7 @@ DecoderErrorOr<CodedFrame> FFmpegDemuxer::get_next_sample_for_track(Track track)
auto sample = CodedFrame(
time_units_to_duration(m_packet->pts, stream->time_base),
move(packet_data),
CodedVideoFrameData(CodingIndependentCodePoints(color_primaries, transfer_characteristics, matrix_coefficients, color_range)));
auxiliary_data);
// Wipe the packet now that the data is safe.
av_packet_unref(m_packet);