#include <assert.h>
+#include <algorithm>
#include <mutex>
#include <string>
#include <vector>
using namespace std;
-Mux::Mux(AVFormatContext *avctx, int width, int height, Codec video_codec, const AVCodec *codec_audio, int time_base, int bit_rate, KeyFrameSignalReceiver *keyframe_signal_receiver)
+struct PacketBefore {
+ PacketBefore(const AVFormatContext *ctx) : ctx(ctx) {}
+
+ bool operator() (const AVPacket *a, const AVPacket *b) const {
+ int64_t a_dts = (a->dts == AV_NOPTS_VALUE ? a->pts : a->dts);
+ int64_t b_dts = (b->dts == AV_NOPTS_VALUE ? b->pts : b->dts);
+ AVRational a_timebase = ctx->streams[a->stream_index]->time_base;
+ AVRational b_timebase = ctx->streams[b->stream_index]->time_base;
+ if (av_compare_ts(a_dts, a_timebase, b_dts, b_timebase) != 0) {
+ return av_compare_ts(a_dts, a_timebase, b_dts, b_timebase) < 0;
+ } else {
+ return av_compare_ts(a->pts, a_timebase, b->pts, b_timebase) < 0;
+ }
+ }
+
+ const AVFormatContext * const ctx;
+};
+
+Mux::Mux(AVFormatContext *avctx, int width, int height, Codec video_codec, const string &video_extradata, const AVCodecContext *audio_ctx, int time_base, KeyFrameSignalReceiver *keyframe_signal_receiver)
: avctx(avctx), keyframe_signal_receiver(keyframe_signal_receiver)
{
AVCodec *codec_video = avcodec_find_encoder((video_codec == CODEC_H264) ? AV_CODEC_ID_H264 : AV_CODEC_ID_RAWVIDEO);
avstream_video->codec->color_range = AVCOL_RANGE_MPEG; // Full vs. limited range (output_ycbcr_format.full_range).
avstream_video->codec->chroma_sample_location = AVCHROMA_LOC_LEFT; // Chroma sample location. See chroma_offset_0[] in Mixer::subsample_chroma().
avstream_video->codec->field_order = AV_FIELD_PROGRESSIVE;
- if (avctx->oformat->flags & AVFMT_GLOBALHEADER) {
- avstream_video->codec->flags = AV_CODEC_FLAG_GLOBAL_HEADER;
+
+ if (!video_extradata.empty()) {
+ avstream_video->codec->extradata = (uint8_t *)av_malloc(video_extradata.size());
+ avstream_video->codec->extradata_size = video_extradata.size();
+ memcpy(avstream_video->codec->extradata, video_extradata.data(), video_extradata.size());
}
- avstream_audio = avformat_new_stream(avctx, codec_audio);
+ avstream_audio = avformat_new_stream(avctx, nullptr);
if (avstream_audio == nullptr) {
fprintf(stderr, "avformat_new_stream() failed\n");
exit(1);
}
avstream_audio->time_base = AVRational{1, time_base};
- avstream_audio->codec->bit_rate = bit_rate;
- avstream_audio->codec->sample_rate = OUTPUT_FREQUENCY;
- avstream_audio->codec->channels = 2;
- avstream_audio->codec->channel_layout = AV_CH_LAYOUT_STEREO;
- avstream_audio->codec->time_base = AVRational{1, time_base};
- if (avctx->oformat->flags & AVFMT_GLOBALHEADER) {
- avstream_audio->codec->flags = AV_CODEC_FLAG_GLOBAL_HEADER;
- }
+ avcodec_copy_context(avstream_audio->codec, audio_ctx);
AVDictionary *options = NULL;
vector<pair<string, string>> opts = MUX_OPTS;
assert(false);
}
+ {
+ lock_guard<mutex> lock(mu);
+ if (plug_count > 0) {
+ plugged_packets.push_back(av_packet_clone(&pkt_copy));
+ } else {
+ add_interleaved_packet(pkt_copy);
+ }
+ }
+
+ av_packet_unref(&pkt_copy);
+}
+
+void Mux::add_interleaved_packet(const AVPacket &pkt)
+{
+ if (waiting_packets.empty() || waiting_packets.front()->stream_index == pkt.stream_index) {
+ // We could still get packets of the other type with earlier pts/dts,
+ // so we'll have to queue and wait.
+ waiting_packets.push(av_packet_clone(const_cast<AVPacket *>(&pkt)));
+ return;
+ }
+
+ // Flush all the queued packets that are supposed to go before this.
+ PacketBefore before(avctx);
+ while (!waiting_packets.empty() && !before(&pkt, waiting_packets.front())) {
+ AVPacket *queued_pkt = waiting_packets.front();
+ waiting_packets.pop();
+ write_packet_with_signal(*queued_pkt);
+ av_packet_free(&queued_pkt);
+ }
+
+ if (waiting_packets.empty()) {
+ waiting_packets.push(av_packet_clone(const_cast<AVPacket *>(&pkt)));
+ } else {
+ write_packet_with_signal(pkt);
+ }
+}
+
+void Mux::write_packet_with_signal(const AVPacket &pkt)
+{
if (keyframe_signal_receiver) {
if (pkt.flags & AV_PKT_FLAG_KEY) {
av_write_frame(avctx, nullptr);
keyframe_signal_receiver->signal_keyframe();
}
}
+ if (av_write_frame(avctx, const_cast<AVPacket *>(&pkt)) < 0) {
+ fprintf(stderr, "av_interleaved_write_frame() failed\n");
+ exit(1);
+ }
+ avio_flush(avctx->pb);
+}
- {
- lock_guard<mutex> lock(ctx_mu);
- if (av_interleaved_write_frame(avctx, &pkt_copy) < 0) {
- fprintf(stderr, "av_interleaved_write_frame() failed\n");
- exit(1);
- }
+void Mux::plug()
+{
+ lock_guard<mutex> lock(mu);
+ ++plug_count;
+}
+
+void Mux::unplug()
+{
+ lock_guard<mutex> lock(mu);
+ if (--plug_count > 0) {
+ return;
}
+ assert(plug_count >= 0);
- av_packet_unref(&pkt_copy);
+ sort(plugged_packets.begin(), plugged_packets.end(), PacketBefore(avctx));
+
+ for (AVPacket *pkt : plugged_packets) {
+ add_interleaved_packet(*pkt);
+ av_packet_free(&pkt);
+ }
+ plugged_packets.clear();
}