- There is a critical bug fix with x264 speed control in
928bd9d5def4f0ca5071ea176a11b816a01e6495, pushed to git mid-June 2016.
+ - Nageru depends on an avformat API for marking block boundaries in the
+ muxed byte stream that didn't enter ffmpeg before
+ 4e7a9212820a56bc731c09b2f11ae1422d070837, pushed to git late June 2016.
+
The patches/ directory contains a patch that helps zita-resampler performance.
It is meant for upstream, but was not in at the time Nageru was released.
const AVFormatContext * const ctx;
};
-Mux::Mux(AVFormatContext *avctx, int width, int height, Codec video_codec, const string &video_extradata, const AVCodecContext *audio_ctx, int time_base, KeyFrameSignalReceiver *keyframe_signal_receiver)
- : avctx(avctx), keyframe_signal_receiver(keyframe_signal_receiver)
+Mux::Mux(AVFormatContext *avctx, int width, int height, Codec video_codec, const string &video_extradata, const AVCodecContext *audio_ctx, int time_base)
+ : avctx(avctx)
{
AVCodec *codec_video = avcodec_find_encoder((video_codec == CODEC_H264) ? AV_CODEC_ID_H264 : AV_CODEC_ID_RAWVIDEO);
avstream_video = avformat_new_stream(avctx, codec_video);
if (plug_count > 0) {
plugged_packets.push_back(av_packet_clone(&pkt_copy));
} else {
- add_interleaved_packet(pkt_copy);
+ write_packet_or_die(pkt_copy);
}
}
av_packet_unref(&pkt_copy);
}
-void Mux::add_interleaved_packet(const AVPacket &pkt)
+void Mux::write_packet_or_die(const AVPacket &pkt)
{
- if (waiting_packets.empty() || waiting_packets.front()->stream_index == pkt.stream_index) {
- // We could still get packets of the other type with earlier pts/dts,
- // so we'll have to queue and wait.
- waiting_packets.push(av_packet_clone(const_cast<AVPacket *>(&pkt)));
- return;
- }
-
- // Flush all the queued packets that are supposed to go before this.
- PacketBefore before(avctx);
- while (!waiting_packets.empty() && !before(&pkt, waiting_packets.front())) {
- AVPacket *queued_pkt = waiting_packets.front();
- waiting_packets.pop();
- write_packet_with_signal(*queued_pkt);
- av_packet_free(&queued_pkt);
- }
-
- if (waiting_packets.empty()) {
- waiting_packets.push(av_packet_clone(const_cast<AVPacket *>(&pkt)));
- } else {
- write_packet_with_signal(pkt);
- }
-}
-
-void Mux::write_packet_with_signal(const AVPacket &pkt)
-{
- if (keyframe_signal_receiver) {
- if (pkt.flags & AV_PKT_FLAG_KEY) {
- av_write_frame(avctx, nullptr);
- keyframe_signal_receiver->signal_keyframe();
- }
- }
- if (av_write_frame(avctx, const_cast<AVPacket *>(&pkt)) < 0) {
+ if (av_interleaved_write_frame(avctx, const_cast<AVPacket *>(&pkt)) < 0) {
fprintf(stderr, "av_interleaved_write_frame() failed\n");
exit(1);
}
sort(plugged_packets.begin(), plugged_packets.end(), PacketBefore(avctx));
for (AVPacket *pkt : plugged_packets) {
- add_interleaved_packet(*pkt);
+ write_packet_or_die(*pkt);
av_packet_free(&pkt);
}
plugged_packets.clear();
#include <queue>
#include <vector>
-class KeyFrameSignalReceiver {
-public:
- // Needs to automatically turn the flag off again after actually receiving data.
- virtual void signal_keyframe() = 0;
-};
-
class Mux {
public:
enum Codec {
};
// Takes ownership of avctx. <keyframe_signal_receiver> can be nullptr.
- Mux(AVFormatContext *avctx, int width, int height, Codec video_codec, const std::string &video_extradata, const AVCodecContext *audio_ctx, int time_base, KeyFrameSignalReceiver *keyframe_signal_receiver);
+ Mux(AVFormatContext *avctx, int width, int height, Codec video_codec, const std::string &video_extradata, const AVCodecContext *audio_ctx, int time_base);
~Mux();
void add_packet(const AVPacket &pkt, int64_t pts, int64_t dts);
void unplug();
private:
- void add_interleaved_packet(const AVPacket &pkt); // Must be called with <mu> held.
- void write_packet_with_signal(const AVPacket &pkt); // Must be called with <mu> held.
+ void write_packet_or_die(const AVPacket &pkt); // Must be called with <mu> held.
std::mutex mu;
AVFormatContext *avctx; // Protected by <mu>.
int plug_count = 0; // Protected by <mu>.
std::vector<AVPacket *> plugged_packets; // Protected by <mu>.
- // We need to do our own interleaving since we do explicit flushes
- // before each keyframe. This queue contains every packet that we
- // couldn't send yet, in add order. Essentially, we can't send a packet
- // before we know we cannot receive an earlier (dts-wise) packet
- // from another stream. This means that this queue will either contain
- // video packets only or audio packets only, and as soon as a packet
- // of the other type comes in, we can empty the flush the queue up
- // to that point.
- // Protected by <mu>.
- std::queue<AVPacket *> waiting_packets;
-
AVStream *avstream_video, *avstream_audio;
- KeyFrameSignalReceiver *keyframe_signal_receiver;
};
#endif // !defined(_MUX_H)
}
string video_extradata = ""; // FIXME: See other comment about global headers.
- file_mux.reset(new Mux(avctx, frame_width, frame_height, Mux::CODEC_H264, video_extradata, file_audio_encoder->get_ctx(), TIMEBASE, nullptr));
+ file_mux.reset(new Mux(avctx, frame_width, frame_height, Mux::CODEC_H264, video_extradata, file_audio_encoder->get_ctx(), TIMEBASE));
}
void QuickSyncEncoderImpl::encode_thread_func()
avctx->oformat = oformat;
uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
- avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, &VideoEncoder::write_packet_thunk, nullptr);
+ avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
+ avctx->pb->write_data_type = &VideoEncoder::write_packet2_thunk;
+ avctx->pb->ignore_boundary_point = 1;
Mux::Codec video_codec;
if (global_flags.uncompressed_video_to_http) {
}
int time_base = global_flags.stream_coarse_timebase ? COARSE_TIMEBASE : TIMEBASE;
- stream_mux_writing_header = true;
- stream_mux.reset(new Mux(avctx, width, height, video_codec, video_extradata, stream_audio_encoder->get_ctx(), time_base, this));
- stream_mux_writing_header = false;
- httpd->set_header(stream_mux_header);
- stream_mux_header.clear();
+ stream_mux.reset(new Mux(avctx, width, height, video_codec, video_extradata, stream_audio_encoder->get_ctx(), time_base));
}
-int VideoEncoder::write_packet_thunk(void *opaque, uint8_t *buf, int buf_size)
+int VideoEncoder::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
{
VideoEncoder *video_encoder = (VideoEncoder *)opaque;
- return video_encoder->write_packet(buf, buf_size);
+ return video_encoder->write_packet2(buf, buf_size, type, time);
}
-int VideoEncoder::write_packet(uint8_t *buf, int buf_size)
+int VideoEncoder::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
{
- if (stream_mux_writing_header) {
+ if (type == AVIO_DATA_MARKER_HEADER) {
stream_mux_header.append((char *)buf, buf_size);
+ httpd->set_header(stream_mux_header);
} else {
- httpd->add_data((char *)buf, buf_size, stream_mux_writing_keyframes);
- stream_mux_writing_keyframes = false;
+ httpd->add_data((char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT);
}
return buf_size;
}
class ResourcePool;
} // namespace movit
-class VideoEncoder : public KeyFrameSignalReceiver {
+class VideoEncoder {
public:
VideoEncoder(movit::ResourcePool *resource_pool, QSurface *surface, const std::string &va_display, int width, int height, HTTPD *httpd);
~VideoEncoder();
// Does a cut of the disk stream immediately ("frame" is used for the filename only).
void do_cut(int frame);
- virtual void signal_keyframe() override {
- stream_mux_writing_keyframes = true;
- }
-
private:
void open_output_stream();
- static int write_packet_thunk(void *opaque, uint8_t *buf, int buf_size);
- int write_packet(uint8_t *buf, int buf_size);
+ static int write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time);
+ int write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time);
AVOutputFormat *oformat;
std::mutex qs_mu;
std::unique_ptr<AudioEncoder> stream_audio_encoder;
std::unique_ptr<X264Encoder> x264_encoder; // nullptr if not using x264.
- // While Mux object is constructing, <stream_mux_writing_header> is true,
- // and the header is being collected into stream_mux_header.
- bool stream_mux_writing_header;
std::string stream_mux_header;
- bool stream_mux_writing_keyframes = false;
-
std::atomic<int> quicksync_encoders_in_shutdown{0};
// Encoders that are shutdown, but need to call release_gl_resources()