From: Steinar H. Gunderson Date: Sun, 1 Oct 2023 09:11:52 +0000 (+0200) Subject: Stop using av_init_packet(). X-Git-Tag: 2.3.0~8 X-Git-Url: https://git.sesse.net/?p=nageru;a=commitdiff_plain;h=2f92c975a3cf9f4803a58267fd2a12765e34a69e Stop using av_init_packet(). It's a shame that we now need to call malloc for each and every allocation of a common 104-byte struct, but evidently, that's the way FFmpeg wants it. So move to heap allocation everywhere, silencing a barrage of deprecation warnings during build. --- diff --git a/futatabi/export.cpp b/futatabi/export.cpp index a16b41d..b02361e 100644 --- a/futatabi/export.cpp +++ b/futatabi/export.cpp @@ -34,16 +34,15 @@ struct BufferedFrame { bool write_buffered_frames(AVFormatContext *avctx, const vector &buffered_frames) { for (const BufferedFrame &frame : buffered_frames) { - AVPacket pkt; - av_init_packet(&pkt); - pkt.stream_index = frame.video_stream_idx; - pkt.data = (uint8_t *)frame.data.data(); - pkt.size = frame.data.size(); - pkt.pts = frame.pts; - pkt.dts = frame.pts; - pkt.flags = AV_PKT_FLAG_KEY; + AVPacketWithDeleter pkt = av_packet_alloc_unique(); + pkt->stream_index = frame.video_stream_idx; + pkt->data = (uint8_t *)frame.data.data(); + pkt->size = frame.data.size(); + pkt->pts = frame.pts; + pkt->dts = frame.pts; + pkt->flags = AV_PKT_FLAG_KEY; - if (av_write_frame(avctx, &pkt) < 0) { + if (av_write_frame(avctx, pkt.get()) < 0) { return false; } } diff --git a/futatabi/main.cpp b/futatabi/main.cpp index bd18780..00de13f 100644 --- a/futatabi/main.cpp +++ b/futatabi/main.cpp @@ -526,44 +526,41 @@ void record_thread_func() vector pending_audio[MAX_STREAMS]; int64_t last_pts = -1; while (!should_quit.load()) { - AVPacket pkt; - unique_ptr pkt_cleanup( - &pkt, av_packet_unref); - av_init_packet(&pkt); - pkt.data = nullptr; - pkt.size = 0; + AVPacketWithDeleter pkt = av_packet_alloc_unique(); + pkt->data = nullptr; + pkt->size = 0; // TODO: Make it possible to abort av_read_frame() (use an interrupt callback); // right now, should_quit will be ignored if it's hung on I/O. - if (av_read_frame(format_ctx.get(), &pkt) != 0) { + if (av_read_frame(format_ctx.get(), pkt.get()) != 0) { break; } - AVStream *stream = format_ctx->streams[pkt.stream_index]; + AVStream *stream = format_ctx->streams[pkt->stream_index]; if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && - audio_stream_to_video_stream_idx.count(pkt.stream_index)) { - if ((pkt.size % (sizeof(uint32_t) * 2)) != 0) { + audio_stream_to_video_stream_idx.count(pkt->stream_index)) { + if ((pkt->size % (sizeof(uint32_t) * 2)) != 0) { fprintf(stderr, "Audio stream %u had a packet of strange length %d, ignoring.\n", - pkt.stream_index, pkt.size); + pkt->stream_index, pkt->size); } else { // TODO: Endianness? - const uint32_t *begin = (const uint32_t *)pkt.data; - const uint32_t *end = (const uint32_t *)(pkt.data + pkt.size); - pending_audio[audio_stream_to_video_stream_idx[pkt.stream_index]].assign(begin, end); + const uint32_t *begin = (const uint32_t *)pkt->data; + const uint32_t *end = (const uint32_t *)(pkt->data + pkt->size); + pending_audio[audio_stream_to_video_stream_idx[pkt->stream_index]].assign(begin, end); } } - if (pkt.stream_index >= MAX_STREAMS || + if (pkt->stream_index >= MAX_STREAMS || stream->codecpar->codec_type != AVMEDIA_TYPE_VIDEO) { continue; } - ++metric_received_frames[pkt.stream_index]; - metric_received_frame_size_bytes.count_event(pkt.size); + ++metric_received_frames[pkt->stream_index]; + metric_received_frame_size_bytes.count_event(pkt->size); // Convert pts to our own timebase. AVRational stream_timebase = stream->time_base; - int64_t pts = av_rescale_q(pkt.pts, stream_timebase, AVRational{ 1, TIMEBASE }); + int64_t pts = av_rescale_q(pkt->pts, stream_timebase, AVRational{ 1, TIMEBASE }); // Translate offset into our stream. if (last_pts == -1) { @@ -572,11 +569,11 @@ void record_thread_func() pts = std::max(pts + pts_offset, start_pts); //fprintf(stderr, "Got a frame from camera %d, pts = %ld, size = %d\n", - // pkt.stream_index, pts, pkt.size); - FrameOnDisk frame = write_frame(pkt.stream_index, pts, pkt.data, pkt.size, move(pending_audio[pkt.stream_index]), &db); + // pkt->stream_index, pts, pkt->size); + FrameOnDisk frame = write_frame(pkt->stream_index, pts, pkt->data, pkt->size, move(pending_audio[pkt->stream_index]), &db); - post_to_main_thread([pkt, frame] { - global_mainwindow->display_frame(pkt.stream_index, frame); + post_to_main_thread([stream_index{pkt->stream_index}, frame] { + global_mainwindow->display_frame(stream_index, frame); }); if (last_pts != -1 && global_flags.slow_down_input) { diff --git a/futatabi/video_stream.cpp b/futatabi/video_stream.cpp index 2a0c7c0..c12acdf 100644 --- a/futatabi/video_stream.cpp +++ b/futatabi/video_stream.cpp @@ -15,6 +15,7 @@ extern "C" { #include "pbo_pool.h" #include "player.h" #include "shared/context.h" +#include "shared/ffmpeg_raii.h" #include "shared/httpd.h" #include "shared/metrics.h" #include "shared/shared_defs.h" @@ -743,27 +744,25 @@ void VideoStream::encode_thread_func() // Hack: We mux the subtitle packet one time unit before the actual frame, // so that Nageru is sure to get it first. if (!qf.subtitle.empty() && with_subtitles == Mux::WITH_SUBTITLES) { - AVPacket pkt; - av_init_packet(&pkt); - pkt.stream_index = mux->get_subtitle_stream_idx(); - assert(pkt.stream_index != -1); - pkt.data = (uint8_t *)qf.subtitle.data(); - pkt.size = qf.subtitle.size(); - pkt.flags = 0; - pkt.duration = lrint(TIMEBASE / global_flags.output_framerate); // Doesn't really matter for Nageru. - mux->add_packet(pkt, qf.output_pts - 1, qf.output_pts - 1); + AVPacketWithDeleter pkt = av_packet_alloc_unique(); + pkt->stream_index = mux->get_subtitle_stream_idx(); + assert(pkt->stream_index != -1); + pkt->data = (uint8_t *)qf.subtitle.data(); + pkt->size = qf.subtitle.size(); + pkt->flags = 0; + pkt->duration = lrint(TIMEBASE / global_flags.output_framerate); // Doesn't really matter for Nageru. + mux->add_packet(*pkt, qf.output_pts - 1, qf.output_pts - 1); } if (qf.type == QueuedFrame::ORIGINAL) { // Send the JPEG frame on, unchanged. string jpeg = move(*qf.encoded_jpeg); - AVPacket pkt; - av_init_packet(&pkt); - pkt.stream_index = 0; - pkt.data = (uint8_t *)jpeg.data(); - pkt.size = jpeg.size(); - pkt.flags = AV_PKT_FLAG_KEY; - mux->add_packet(pkt, qf.output_pts, qf.output_pts); + AVPacketWithDeleter pkt = av_packet_alloc_unique(); + pkt->stream_index = 0; + pkt->data = (uint8_t *)jpeg.data(); + pkt->size = jpeg.size(); + pkt->flags = AV_PKT_FLAG_KEY; + mux->add_packet(*pkt, qf.output_pts, qf.output_pts); last_frame = move(jpeg); add_audio_or_silence(qf); @@ -777,13 +776,12 @@ void VideoStream::encode_thread_func() // Now JPEG encode it, and send it on to the stream. string jpeg = encode_jpeg_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height, /*exif_data=*/""); - AVPacket pkt; - av_init_packet(&pkt); - pkt.stream_index = 0; - pkt.data = (uint8_t *)jpeg.data(); - pkt.size = jpeg.size(); - pkt.flags = AV_PKT_FLAG_KEY; - mux->add_packet(pkt, qf.output_pts, qf.output_pts); + AVPacketWithDeleter pkt = av_packet_alloc_unique(); + pkt->stream_index = 0; + pkt->data = (uint8_t *)jpeg.data(); + pkt->size = jpeg.size(); + pkt->flags = AV_PKT_FLAG_KEY; + mux->add_packet(*pkt, qf.output_pts, qf.output_pts); last_frame = move(jpeg); add_audio_or_silence(qf); @@ -822,24 +820,22 @@ void VideoStream::encode_thread_func() interpolate->release_texture(qf.cbcr_tex); } - AVPacket pkt; - av_init_packet(&pkt); - pkt.stream_index = 0; - pkt.data = (uint8_t *)jpeg.data(); - pkt.size = jpeg.size(); - pkt.flags = AV_PKT_FLAG_KEY; - mux->add_packet(pkt, qf.output_pts, qf.output_pts); + AVPacketWithDeleter pkt = av_packet_alloc_unique(); + pkt->stream_index = 0; + pkt->data = (uint8_t *)jpeg.data(); + pkt->size = jpeg.size(); + pkt->flags = AV_PKT_FLAG_KEY; + mux->add_packet(*pkt, qf.output_pts, qf.output_pts); last_frame = move(jpeg); add_audio_or_silence(qf); } else if (qf.type == QueuedFrame::REFRESH) { - AVPacket pkt; - av_init_packet(&pkt); - pkt.stream_index = 0; - pkt.data = (uint8_t *)last_frame.data(); - pkt.size = last_frame.size(); - pkt.flags = AV_PKT_FLAG_KEY; - mux->add_packet(pkt, qf.output_pts, qf.output_pts); + AVPacketWithDeleter pkt = av_packet_alloc_unique(); + pkt->stream_index = 0; + pkt->data = (uint8_t *)last_frame.data(); + pkt->size = last_frame.size(); + pkt->flags = AV_PKT_FLAG_KEY; + mux->add_packet(*pkt, qf.output_pts, qf.output_pts); add_audio_or_silence(qf); // Definitely silence. } else if (qf.type == QueuedFrame::SILENCE) { @@ -887,13 +883,12 @@ void VideoStream::add_silence(int64_t pts, int64_t length_pts) long num_samples = lrint(length_pts * double(OUTPUT_FREQUENCY) / double(TIMEBASE)) * 2; uint8_t *zero = (uint8_t *)calloc(num_samples, sizeof(int32_t)); - AVPacket pkt; - av_init_packet(&pkt); - pkt.stream_index = 1; - pkt.data = zero; - pkt.size = num_samples * sizeof(int32_t); - pkt.flags = AV_PKT_FLAG_KEY; - mux->add_packet(pkt, pts, pts); + AVPacketWithDeleter pkt = av_packet_alloc_unique(); + pkt->stream_index = 1; + pkt->data = zero; + pkt->size = num_samples * sizeof(int32_t); + pkt->flags = AV_PKT_FLAG_KEY; + mux->add_packet(*pkt, pts, pts); free(zero); } @@ -904,12 +899,11 @@ void VideoStream::add_audio_or_silence(const QueuedFrame &qf) int64_t frame_length = lrint(double(TIMEBASE) / global_flags.output_framerate); add_silence(qf.output_pts, frame_length); } else { - AVPacket pkt; - av_init_packet(&pkt); - pkt.stream_index = 1; - pkt.data = (uint8_t *)qf.audio.data(); - pkt.size = qf.audio.size(); - pkt.flags = AV_PKT_FLAG_KEY; - mux->add_packet(pkt, qf.output_pts, qf.output_pts); + AVPacketWithDeleter pkt = av_packet_alloc_unique(); + pkt->stream_index = 1; + pkt->data = (uint8_t *)qf.audio.data(); + pkt->size = qf.audio.size(); + pkt->flags = AV_PKT_FLAG_KEY; + mux->add_packet(*pkt, qf.output_pts, qf.output_pts); } } diff --git a/nageru/audio_encoder.cpp b/nageru/audio_encoder.cpp index 126e0e2..e683265 100644 --- a/nageru/audio_encoder.cpp +++ b/nageru/audio_encoder.cpp @@ -135,18 +135,16 @@ void AudioEncoder::encode_audio_one_frame(const float *audio, size_t num_samples } for ( ;; ) { // Termination condition within loop. - AVPacket pkt; - av_init_packet(&pkt); - pkt.data = nullptr; - pkt.size = 0; - int err = avcodec_receive_packet(ctx, &pkt); + AVPacketWithDeleter pkt = av_packet_alloc_unique(); + pkt->data = nullptr; + pkt->size = 0; + int err = avcodec_receive_packet(ctx, pkt.get()); if (err == 0) { - pkt.stream_index = 1; - pkt.flags = 0; + pkt->stream_index = 1; + pkt->flags = 0; for (Mux *mux : muxes) { - mux->add_packet(pkt, pkt.pts, pkt.dts); + mux->add_packet(*pkt, pkt->pts, pkt->dts); } - av_packet_unref(&pkt); } else if (err == AVERROR(EAGAIN)) { break; } else { @@ -171,18 +169,16 @@ void AudioEncoder::encode_last_audio() if (ctx->codec->capabilities & AV_CODEC_CAP_DELAY) { // Collect any delayed frames. for ( ;; ) { - AVPacket pkt; - av_init_packet(&pkt); - pkt.data = nullptr; - pkt.size = 0; - int err = avcodec_receive_packet(ctx, &pkt); + AVPacketWithDeleter pkt = av_packet_alloc_unique(); + pkt->data = nullptr; + pkt->size = 0; + int err = avcodec_receive_packet(ctx, pkt.get()); if (err == 0) { - pkt.stream_index = 1; - pkt.flags = 0; + pkt->stream_index = 1; + pkt->flags = 0; for (Mux *mux : muxes) { - mux->add_packet(pkt, pkt.pts, pkt.dts); + mux->add_packet(*pkt, pkt->pts, pkt->dts); } - av_packet_unref(&pkt); } else if (err == AVERROR_EOF) { break; } else { diff --git a/nageru/ffmpeg_capture.cpp b/nageru/ffmpeg_capture.cpp index 9af4e83..09c1d26 100644 --- a/nageru/ffmpeg_capture.cpp +++ b/nageru/ffmpeg_capture.cpp @@ -901,34 +901,31 @@ AVFrameWithDeleter FFmpegCapture::decode_frame(AVFormatContext *format_ctx, AVCo *audio_pts = -1; bool has_audio = false; do { - AVPacket pkt; - unique_ptr pkt_cleanup( - &pkt, av_packet_unref); - av_init_packet(&pkt); - pkt.data = nullptr; - pkt.size = 0; - if (av_read_frame(format_ctx, &pkt) == 0) { - if (pkt.stream_index == audio_stream_index && audio_callback != nullptr) { - audio_callback(&pkt, format_ctx->streams[audio_stream_index]->time_base); + AVPacketWithDeleter pkt = av_packet_alloc_unique(); + pkt->data = nullptr; + pkt->size = 0; + if (av_read_frame(format_ctx, pkt.get()) == 0) { + if (pkt->stream_index == audio_stream_index && audio_callback != nullptr) { + audio_callback(pkt.get(), format_ctx->streams[audio_stream_index]->time_base); } - if (pkt.stream_index == video_stream_index && video_callback != nullptr) { - video_callback(&pkt, format_ctx->streams[video_stream_index]->time_base); + if (pkt->stream_index == video_stream_index && video_callback != nullptr) { + video_callback(pkt.get(), format_ctx->streams[video_stream_index]->time_base); } - if (pkt.stream_index == video_stream_index && global_flags.transcode_video) { - if (avcodec_send_packet(video_codec_ctx, &pkt) < 0) { + if (pkt->stream_index == video_stream_index && global_flags.transcode_video) { + if (avcodec_send_packet(video_codec_ctx, pkt.get()) < 0) { fprintf(stderr, "%s: Cannot send packet to video codec.\n", pathname.c_str()); *error = true; return AVFrameWithDeleter(nullptr); } - } else if (pkt.stream_index == audio_stream_index && global_flags.transcode_audio) { + } else if (pkt->stream_index == audio_stream_index && global_flags.transcode_audio) { has_audio = true; - if (avcodec_send_packet(audio_codec_ctx, &pkt) < 0) { + if (avcodec_send_packet(audio_codec_ctx, pkt.get()) < 0) { fprintf(stderr, "%s: Cannot send packet to audio codec.\n", pathname.c_str()); *error = true; return AVFrameWithDeleter(nullptr); } - } else if (pkt.stream_index == subtitle_stream_index) { - last_subtitle = string(reinterpret_cast(pkt.data), pkt.size); + } else if (pkt->stream_index == subtitle_stream_index) { + last_subtitle = string(reinterpret_cast(pkt->data), pkt->size); has_last_subtitle = true; } } else { diff --git a/nageru/image_input.cpp b/nageru/image_input.cpp index 2e06271..7b11679 100644 --- a/nageru/image_input.cpp +++ b/nageru/image_input.cpp @@ -143,17 +143,14 @@ shared_ptr ImageInput::load_image_raw(const string &pat AVFrameWithDeleter frame = av_frame_alloc_unique(); bool eof = false; do { - AVPacket pkt; - unique_ptr pkt_cleanup( - &pkt, av_packet_unref); - av_init_packet(&pkt); - pkt.data = nullptr; - pkt.size = 0; - if (av_read_frame(format_ctx.get(), &pkt) == 0) { - if (pkt.stream_index != stream_index) { + AVPacketWithDeleter pkt = av_packet_alloc_unique(); + pkt->data = nullptr; + pkt->size = 0; + if (av_read_frame(format_ctx.get(), pkt.get()) == 0) { + if (pkt->stream_index != stream_index) { continue; } - if (avcodec_send_packet(codec_ctx.get(), &pkt) < 0) { + if (avcodec_send_packet(codec_ctx.get(), pkt.get()) < 0) { fprintf(stderr, "%s: Cannot send packet to codec.\n", pathname.c_str()); return nullptr; } diff --git a/nageru/kaeru.cpp b/nageru/kaeru.cpp index 9ff672d..1699d24 100644 --- a/nageru/kaeru.cpp +++ b/nageru/kaeru.cpp @@ -162,10 +162,8 @@ void filter_packet_callback(Mux *mux, int stream_index, AVBSFContext *bsfctx, co fprintf(stderr, "av_bsf_send_packet() failed with %d, ignoring\n", err); } for ( ;; ) { - AVPacket out_pkt; - unique_ptr pkt_cleanup(&out_pkt, av_packet_unref); - av_init_packet(&out_pkt); - err = av_bsf_receive_packet(bsfctx, &out_pkt); + AVPacketWithDeleter out_pkt = av_packet_alloc_unique(); + err = av_bsf_receive_packet(bsfctx, out_pkt.get()); if (err == AVERROR(EAGAIN)) { break; } @@ -173,7 +171,7 @@ void filter_packet_callback(Mux *mux, int stream_index, AVBSFContext *bsfctx, co fprintf(stderr, "av_bsf_receive_packet() failed with %d, ignoring\n", err); return; } - mux->add_packet(out_pkt, out_pkt.pts, out_pkt.dts == AV_NOPTS_VALUE ? out_pkt.pts : out_pkt.dts, timebase, stream_index); + mux->add_packet(*out_pkt, out_pkt->pts, out_pkt->dts == AV_NOPTS_VALUE ? out_pkt->pts : out_pkt->dts, timebase, stream_index); } } diff --git a/shared/ffmpeg_raii.cpp b/shared/ffmpeg_raii.cpp index 0e087c2..f77dfab 100644 --- a/shared/ffmpeg_raii.cpp +++ b/shared/ffmpeg_raii.cpp @@ -94,6 +94,17 @@ AVFrameWithDeleter av_frame_alloc_unique() return AVFrameWithDeleter(av_frame_alloc()); } +// AVPacket +void av_packet_free_unique::operator() (AVPacket *packet) const +{ + av_packet_unref(packet); +} + +AVPacketWithDeleter av_packet_alloc_unique() +{ + return AVPacketWithDeleter(av_packet_alloc()); +} + // SwsContext void sws_free_context_unique::operator() (SwsContext *context) const diff --git a/shared/ffmpeg_raii.h b/shared/ffmpeg_raii.h index 00f7fc1..ad4edea 100644 --- a/shared/ffmpeg_raii.h +++ b/shared/ffmpeg_raii.h @@ -17,6 +17,7 @@ struct AVCodecParameters; struct AVDictionary; struct AVFormatContext; struct AVFrame; +struct AVPacket; struct AVInputFormat; struct SwsContext; typedef struct AVIOInterruptCB AVIOInterruptCB; @@ -74,6 +75,17 @@ typedef std::unique_ptr AVFrameWithDeleter av_frame_alloc_unique(); +// AVPacket (ick!) +// Not really unique from FFmpeg's point of view, but it is from ours +struct av_packet_free_unique { + void operator() (AVPacket *packet) const; +}; + +typedef std::unique_ptr + AVPacketWithDeleter; + +AVPacketWithDeleter av_packet_alloc_unique(); + // SwsContext struct sws_free_context_unique { void operator() (SwsContext *context) const; diff --git a/shared/mux.cpp b/shared/mux.cpp index 46f727c..b8feacd 100644 --- a/shared/mux.cpp +++ b/shared/mux.cpp @@ -157,7 +157,6 @@ void Mux::add_packet(const AVPacket &pkt, int64_t pts, int64_t dts, AVRational t assert(pts >= dts); AVPacket pkt_copy; - av_init_packet(&pkt_copy); if (av_packet_ref(&pkt_copy, &pkt) < 0) { fprintf(stderr, "av_copy_packet() failed\n"); abort(); @@ -294,6 +293,7 @@ void Mux::write_header() // Make sure the header is written before the constructor exits // (assuming we are in WRITE_FOREGROUND mode). avio_flush(avctx->pb); + } void MuxMetrics::init(const vector> &labels)