bool write_buffered_frames(AVFormatContext *avctx, const vector<BufferedFrame> &buffered_frames)
{
for (const BufferedFrame &frame : buffered_frames) {
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.stream_index = frame.video_stream_idx;
- pkt.data = (uint8_t *)frame.data.data();
- pkt.size = frame.data.size();
- pkt.pts = frame.pts;
- pkt.dts = frame.pts;
- pkt.flags = AV_PKT_FLAG_KEY;
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->stream_index = frame.video_stream_idx;
+ pkt->data = (uint8_t *)frame.data.data();
+ pkt->size = frame.data.size();
+ pkt->pts = frame.pts;
+ pkt->dts = frame.pts;
+ pkt->flags = AV_PKT_FLAG_KEY;
- if (av_write_frame(avctx, &pkt) < 0) {
+ if (av_write_frame(avctx, pkt.get()) < 0) {
return false;
}
}
vector<uint32_t> pending_audio[MAX_STREAMS];
int64_t last_pts = -1;
while (!should_quit.load()) {
- AVPacket pkt;
- unique_ptr<AVPacket, decltype(av_packet_unref) *> pkt_cleanup(
- &pkt, av_packet_unref);
- av_init_packet(&pkt);
- pkt.data = nullptr;
- pkt.size = 0;
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->data = nullptr;
+ pkt->size = 0;
// TODO: Make it possible to abort av_read_frame() (use an interrupt callback);
// right now, should_quit will be ignored if it's hung on I/O.
- if (av_read_frame(format_ctx.get(), &pkt) != 0) {
+ if (av_read_frame(format_ctx.get(), pkt.get()) != 0) {
break;
}
- AVStream *stream = format_ctx->streams[pkt.stream_index];
+ AVStream *stream = format_ctx->streams[pkt->stream_index];
if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO &&
- audio_stream_to_video_stream_idx.count(pkt.stream_index)) {
- if ((pkt.size % (sizeof(uint32_t) * 2)) != 0) {
+ audio_stream_to_video_stream_idx.count(pkt->stream_index)) {
+ if ((pkt->size % (sizeof(uint32_t) * 2)) != 0) {
fprintf(stderr, "Audio stream %u had a packet of strange length %d, ignoring.\n",
- pkt.stream_index, pkt.size);
+ pkt->stream_index, pkt->size);
} else {
// TODO: Endianness?
- const uint32_t *begin = (const uint32_t *)pkt.data;
- const uint32_t *end = (const uint32_t *)(pkt.data + pkt.size);
- pending_audio[audio_stream_to_video_stream_idx[pkt.stream_index]].assign(begin, end);
+ const uint32_t *begin = (const uint32_t *)pkt->data;
+ const uint32_t *end = (const uint32_t *)(pkt->data + pkt->size);
+ pending_audio[audio_stream_to_video_stream_idx[pkt->stream_index]].assign(begin, end);
}
}
- if (pkt.stream_index >= MAX_STREAMS ||
+ if (pkt->stream_index >= MAX_STREAMS ||
stream->codecpar->codec_type != AVMEDIA_TYPE_VIDEO) {
continue;
}
- ++metric_received_frames[pkt.stream_index];
- metric_received_frame_size_bytes.count_event(pkt.size);
+ ++metric_received_frames[pkt->stream_index];
+ metric_received_frame_size_bytes.count_event(pkt->size);
// Convert pts to our own timebase.
AVRational stream_timebase = stream->time_base;
- int64_t pts = av_rescale_q(pkt.pts, stream_timebase, AVRational{ 1, TIMEBASE });
+ int64_t pts = av_rescale_q(pkt->pts, stream_timebase, AVRational{ 1, TIMEBASE });
// Translate offset into our stream.
if (last_pts == -1) {
pts = std::max(pts + pts_offset, start_pts);
//fprintf(stderr, "Got a frame from camera %d, pts = %ld, size = %d\n",
- // pkt.stream_index, pts, pkt.size);
- FrameOnDisk frame = write_frame(pkt.stream_index, pts, pkt.data, pkt.size, move(pending_audio[pkt.stream_index]), &db);
+ // pkt->stream_index, pts, pkt->size);
+ FrameOnDisk frame = write_frame(pkt->stream_index, pts, pkt->data, pkt->size, move(pending_audio[pkt->stream_index]), &db);
- post_to_main_thread([pkt, frame] {
- global_mainwindow->display_frame(pkt.stream_index, frame);
+ post_to_main_thread([stream_index{pkt->stream_index}, frame] {
+ global_mainwindow->display_frame(stream_index, frame);
});
if (last_pts != -1 && global_flags.slow_down_input) {
#include "pbo_pool.h"
#include "player.h"
#include "shared/context.h"
+#include "shared/ffmpeg_raii.h"
#include "shared/httpd.h"
#include "shared/metrics.h"
#include "shared/shared_defs.h"
// Hack: We mux the subtitle packet one time unit before the actual frame,
// so that Nageru is sure to get it first.
if (!qf.subtitle.empty() && with_subtitles == Mux::WITH_SUBTITLES) {
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.stream_index = mux->get_subtitle_stream_idx();
- assert(pkt.stream_index != -1);
- pkt.data = (uint8_t *)qf.subtitle.data();
- pkt.size = qf.subtitle.size();
- pkt.flags = 0;
- pkt.duration = lrint(TIMEBASE / global_flags.output_framerate); // Doesn't really matter for Nageru.
- mux->add_packet(pkt, qf.output_pts - 1, qf.output_pts - 1);
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->stream_index = mux->get_subtitle_stream_idx();
+ assert(pkt->stream_index != -1);
+ pkt->data = (uint8_t *)qf.subtitle.data();
+ pkt->size = qf.subtitle.size();
+ pkt->flags = 0;
+ pkt->duration = lrint(TIMEBASE / global_flags.output_framerate); // Doesn't really matter for Nageru.
+ mux->add_packet(*pkt, qf.output_pts - 1, qf.output_pts - 1);
}
if (qf.type == QueuedFrame::ORIGINAL) {
// Send the JPEG frame on, unchanged.
string jpeg = move(*qf.encoded_jpeg);
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.stream_index = 0;
- pkt.data = (uint8_t *)jpeg.data();
- pkt.size = jpeg.size();
- pkt.flags = AV_PKT_FLAG_KEY;
- mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->stream_index = 0;
+ pkt->data = (uint8_t *)jpeg.data();
+ pkt->size = jpeg.size();
+ pkt->flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(*pkt, qf.output_pts, qf.output_pts);
last_frame = move(jpeg);
add_audio_or_silence(qf);
// Now JPEG encode it, and send it on to the stream.
string jpeg = encode_jpeg_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height, /*exif_data=*/"");
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.stream_index = 0;
- pkt.data = (uint8_t *)jpeg.data();
- pkt.size = jpeg.size();
- pkt.flags = AV_PKT_FLAG_KEY;
- mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->stream_index = 0;
+ pkt->data = (uint8_t *)jpeg.data();
+ pkt->size = jpeg.size();
+ pkt->flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(*pkt, qf.output_pts, qf.output_pts);
last_frame = move(jpeg);
add_audio_or_silence(qf);
interpolate->release_texture(qf.cbcr_tex);
}
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.stream_index = 0;
- pkt.data = (uint8_t *)jpeg.data();
- pkt.size = jpeg.size();
- pkt.flags = AV_PKT_FLAG_KEY;
- mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->stream_index = 0;
+ pkt->data = (uint8_t *)jpeg.data();
+ pkt->size = jpeg.size();
+ pkt->flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(*pkt, qf.output_pts, qf.output_pts);
last_frame = move(jpeg);
add_audio_or_silence(qf);
} else if (qf.type == QueuedFrame::REFRESH) {
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.stream_index = 0;
- pkt.data = (uint8_t *)last_frame.data();
- pkt.size = last_frame.size();
- pkt.flags = AV_PKT_FLAG_KEY;
- mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->stream_index = 0;
+ pkt->data = (uint8_t *)last_frame.data();
+ pkt->size = last_frame.size();
+ pkt->flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(*pkt, qf.output_pts, qf.output_pts);
add_audio_or_silence(qf); // Definitely silence.
} else if (qf.type == QueuedFrame::SILENCE) {
long num_samples = lrint(length_pts * double(OUTPUT_FREQUENCY) / double(TIMEBASE)) * 2;
uint8_t *zero = (uint8_t *)calloc(num_samples, sizeof(int32_t));
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.stream_index = 1;
- pkt.data = zero;
- pkt.size = num_samples * sizeof(int32_t);
- pkt.flags = AV_PKT_FLAG_KEY;
- mux->add_packet(pkt, pts, pts);
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->stream_index = 1;
+ pkt->data = zero;
+ pkt->size = num_samples * sizeof(int32_t);
+ pkt->flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(*pkt, pts, pts);
free(zero);
}
int64_t frame_length = lrint(double(TIMEBASE) / global_flags.output_framerate);
add_silence(qf.output_pts, frame_length);
} else {
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.stream_index = 1;
- pkt.data = (uint8_t *)qf.audio.data();
- pkt.size = qf.audio.size();
- pkt.flags = AV_PKT_FLAG_KEY;
- mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->stream_index = 1;
+ pkt->data = (uint8_t *)qf.audio.data();
+ pkt->size = qf.audio.size();
+ pkt->flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(*pkt, qf.output_pts, qf.output_pts);
}
}
}
for ( ;; ) { // Termination condition within loop.
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.data = nullptr;
- pkt.size = 0;
- int err = avcodec_receive_packet(ctx, &pkt);
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->data = nullptr;
+ pkt->size = 0;
+ int err = avcodec_receive_packet(ctx, pkt.get());
if (err == 0) {
- pkt.stream_index = 1;
- pkt.flags = 0;
+ pkt->stream_index = 1;
+ pkt->flags = 0;
for (Mux *mux : muxes) {
- mux->add_packet(pkt, pkt.pts, pkt.dts);
+ mux->add_packet(*pkt, pkt->pts, pkt->dts);
}
- av_packet_unref(&pkt);
} else if (err == AVERROR(EAGAIN)) {
break;
} else {
if (ctx->codec->capabilities & AV_CODEC_CAP_DELAY) {
// Collect any delayed frames.
for ( ;; ) {
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.data = nullptr;
- pkt.size = 0;
- int err = avcodec_receive_packet(ctx, &pkt);
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->data = nullptr;
+ pkt->size = 0;
+ int err = avcodec_receive_packet(ctx, pkt.get());
if (err == 0) {
- pkt.stream_index = 1;
- pkt.flags = 0;
+ pkt->stream_index = 1;
+ pkt->flags = 0;
for (Mux *mux : muxes) {
- mux->add_packet(pkt, pkt.pts, pkt.dts);
+ mux->add_packet(*pkt, pkt->pts, pkt->dts);
}
- av_packet_unref(&pkt);
} else if (err == AVERROR_EOF) {
break;
} else {
*audio_pts = -1;
bool has_audio = false;
do {
- AVPacket pkt;
- unique_ptr<AVPacket, decltype(av_packet_unref)*> pkt_cleanup(
- &pkt, av_packet_unref);
- av_init_packet(&pkt);
- pkt.data = nullptr;
- pkt.size = 0;
- if (av_read_frame(format_ctx, &pkt) == 0) {
- if (pkt.stream_index == audio_stream_index && audio_callback != nullptr) {
- audio_callback(&pkt, format_ctx->streams[audio_stream_index]->time_base);
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->data = nullptr;
+ pkt->size = 0;
+ if (av_read_frame(format_ctx, pkt.get()) == 0) {
+ if (pkt->stream_index == audio_stream_index && audio_callback != nullptr) {
+ audio_callback(pkt.get(), format_ctx->streams[audio_stream_index]->time_base);
}
- if (pkt.stream_index == video_stream_index && video_callback != nullptr) {
- video_callback(&pkt, format_ctx->streams[video_stream_index]->time_base);
+ if (pkt->stream_index == video_stream_index && video_callback != nullptr) {
+ video_callback(pkt.get(), format_ctx->streams[video_stream_index]->time_base);
}
- if (pkt.stream_index == video_stream_index && global_flags.transcode_video) {
- if (avcodec_send_packet(video_codec_ctx, &pkt) < 0) {
+ if (pkt->stream_index == video_stream_index && global_flags.transcode_video) {
+ if (avcodec_send_packet(video_codec_ctx, pkt.get()) < 0) {
fprintf(stderr, "%s: Cannot send packet to video codec.\n", pathname.c_str());
*error = true;
return AVFrameWithDeleter(nullptr);
}
- } else if (pkt.stream_index == audio_stream_index && global_flags.transcode_audio) {
+ } else if (pkt->stream_index == audio_stream_index && global_flags.transcode_audio) {
has_audio = true;
- if (avcodec_send_packet(audio_codec_ctx, &pkt) < 0) {
+ if (avcodec_send_packet(audio_codec_ctx, pkt.get()) < 0) {
fprintf(stderr, "%s: Cannot send packet to audio codec.\n", pathname.c_str());
*error = true;
return AVFrameWithDeleter(nullptr);
}
- } else if (pkt.stream_index == subtitle_stream_index) {
- last_subtitle = string(reinterpret_cast<const char *>(pkt.data), pkt.size);
+ } else if (pkt->stream_index == subtitle_stream_index) {
+ last_subtitle = string(reinterpret_cast<const char *>(pkt->data), pkt->size);
has_last_subtitle = true;
}
} else {
AVFrameWithDeleter frame = av_frame_alloc_unique();
bool eof = false;
do {
- AVPacket pkt;
- unique_ptr<AVPacket, decltype(av_packet_unref)*> pkt_cleanup(
- &pkt, av_packet_unref);
- av_init_packet(&pkt);
- pkt.data = nullptr;
- pkt.size = 0;
- if (av_read_frame(format_ctx.get(), &pkt) == 0) {
- if (pkt.stream_index != stream_index) {
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->data = nullptr;
+ pkt->size = 0;
+ if (av_read_frame(format_ctx.get(), pkt.get()) == 0) {
+ if (pkt->stream_index != stream_index) {
continue;
}
- if (avcodec_send_packet(codec_ctx.get(), &pkt) < 0) {
+ if (avcodec_send_packet(codec_ctx.get(), pkt.get()) < 0) {
fprintf(stderr, "%s: Cannot send packet to codec.\n", pathname.c_str());
return nullptr;
}
fprintf(stderr, "av_bsf_send_packet() failed with %d, ignoring\n", err);
}
for ( ;; ) {
- AVPacket out_pkt;
- unique_ptr<AVPacket, decltype(av_packet_unref) *> pkt_cleanup(&out_pkt, av_packet_unref);
- av_init_packet(&out_pkt);
- err = av_bsf_receive_packet(bsfctx, &out_pkt);
+ AVPacketWithDeleter out_pkt = av_packet_alloc_unique();
+ err = av_bsf_receive_packet(bsfctx, out_pkt.get());
if (err == AVERROR(EAGAIN)) {
break;
}
fprintf(stderr, "av_bsf_receive_packet() failed with %d, ignoring\n", err);
return;
}
- mux->add_packet(out_pkt, out_pkt.pts, out_pkt.dts == AV_NOPTS_VALUE ? out_pkt.pts : out_pkt.dts, timebase, stream_index);
+ mux->add_packet(*out_pkt, out_pkt->pts, out_pkt->dts == AV_NOPTS_VALUE ? out_pkt->pts : out_pkt->dts, timebase, stream_index);
}
}
return AVFrameWithDeleter(av_frame_alloc());
}
+// AVPacket
+void av_packet_free_unique::operator() (AVPacket *packet) const
+{
+ av_packet_unref(packet);
+}
+
+AVPacketWithDeleter av_packet_alloc_unique()
+{
+ return AVPacketWithDeleter(av_packet_alloc());
+}
+
// SwsContext
void sws_free_context_unique::operator() (SwsContext *context) const
struct AVDictionary;
struct AVFormatContext;
struct AVFrame;
+struct AVPacket;
struct AVInputFormat;
struct SwsContext;
typedef struct AVIOInterruptCB AVIOInterruptCB;
AVFrameWithDeleter av_frame_alloc_unique();
+// AVPacket (ick!)
+// Not really unique from FFmpeg's point of view, but it is from ours
+struct av_packet_free_unique {
+ void operator() (AVPacket *packet) const;
+};
+
+typedef std::unique_ptr<AVPacket, av_packet_free_unique>
+ AVPacketWithDeleter;
+
+AVPacketWithDeleter av_packet_alloc_unique();
+
// SwsContext
struct sws_free_context_unique {
void operator() (SwsContext *context) const;
assert(pts >= dts);
AVPacket pkt_copy;
- av_init_packet(&pkt_copy);
if (av_packet_ref(&pkt_copy, &pkt) < 0) {
fprintf(stderr, "av_copy_packet() failed\n");
abort();
// Make sure the header is written before the constructor exits
// (assuming we are in WRITE_FOREGROUND mode).
avio_flush(avctx->pb);
+
}
void MuxMetrics::init(const vector<pair<string, string>> &labels)