X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=h264encode.cpp;h=3880c186c8bf52edc807badde52324fdde6f8dad;hb=b4f16ea9f8969a3ba14be8cd9c88cfe00d19533b;hp=7e34b5c79653cdd013353361f1ded2cc581cb715;hpb=bc7862c72514f91de51c547b2efe26c90b9631f9;p=nageru diff --git a/h264encode.cpp b/h264encode.cpp index 7e34b5c..3880c18 100644 --- a/h264encode.cpp +++ b/h264encode.cpp @@ -26,6 +26,7 @@ #include #include "context.h" +#include "httpd.h" #include "timebase.h" class QOpenGLContext; @@ -586,11 +587,51 @@ static int build_packed_slice_buffer(unsigned char **header_buffer) {IDR(PBB)(PBB)}. */ -/* - * Return displaying order with specified periods and encoding order - * displaying_order: displaying order - * frame_type: frame type - */ +// General pts/dts strategy: +// +// Getting pts and dts right with variable frame rate (VFR) and B-frames can be a +// bit tricky. We assume first of all that the frame rate never goes _above_ +// , which gives us a frame period N. The decoder can always decode +// in at least this speed, as long at dts <= pts (the frame is not attempted +// presented before it is decoded). Furthermore, we never have longer chains of +// B-frames than a fixed constant C. (In a B-frame chain, we say that the base +// I/P-frame has order O=0, the B-frame depending on it directly has order O=1, +// etc. The last frame in the chain, which no B-frames depend on, is the “tip” +// frame, with an order O <= C.) +// +// Many strategies are possible, but we establish these rules: +// +// - Tip frames have dts = pts - (C-O)*N. +// - Non-tip frames have dts = dts_last + N. +// +// An example, with C=2 and N=10 and the data flow showed with arrows: +// +// I B P B B P +// pts: 30 40 50 60 70 80 +// ↓ ↓ ↓ +// dts: 10 30 20 60 50←40 +// | | ↑ ↑ +// `--|--' | +// `----------' +// +// To show that this works fine also with irregular spacings, let's say that +// the third frame is delayed a bit (something earlier was dropped). Now the +// situation looks like this: +// +// I B P B B P +// pts: 30 40 80 90 100 110 +// ↓ ↓ ↓ +// dts: 10 30 20 90 50←40 +// | | ↑ ↑ +// `--|--' | +// `----------' +// +// The resetting on every tip frame makes sure dts never ends up lagging a lot +// behind pts, and the subtraction of (C-O)*N makes sure pts <= dts. +// +// In the output of this function, if is >= 0, it means to reset the +// dts from the current pts minus , while if it's -1, the frame is not +// a tip frame and should be given a dts based on the previous one. #define FRAME_P 0 #define FRAME_B 1 #define FRAME_I 2 @@ -599,10 +640,12 @@ void encoding2display_order( unsigned long long encoding_order, int intra_period, int intra_idr_period, int ip_period, unsigned long long *displaying_order, - int *frame_type) + int *frame_type, int *pts_lag) { int encoding_order_gop = 0; + *pts_lag = 0; + if (intra_period == 1) { /* all are I/IDR frames */ *displaying_order = encoding_order; if (intra_idr_period == 0) @@ -615,29 +658,47 @@ void encoding2display_order( if (intra_period == 0) intra_idr_period = 0; - /* new sequence like - * IDR PPPPP IPPPPP - * IDR (PBB)(PBB)(IBB)(PBB) - */ - encoding_order_gop = (intra_idr_period == 0)? encoding_order: - (encoding_order % (intra_idr_period + ((ip_period == 1)?0:1))); + if (ip_period == 1) { + // No B-frames, sequence is like IDR PPPPP IPPPPP. + encoding_order_gop = (intra_idr_period == 0) ? encoding_order : (encoding_order % intra_idr_period); + *displaying_order = encoding_order; + + if (encoding_order_gop == 0) { /* the first frame */ + *frame_type = FRAME_IDR; + } else if (intra_period != 0 && /* have I frames */ + encoding_order_gop >= 2 && + (encoding_order_gop % intra_period == 0)) { + *frame_type = FRAME_I; + } else { + *frame_type = FRAME_P; + } + return; + } + + // We have B-frames. Sequence is like IDR (PBB)(PBB)(IBB)(PBB). + encoding_order_gop = (intra_idr_period == 0) ? encoding_order : (encoding_order % (intra_idr_period + 1)); + *pts_lag = -1; // Most frames are not tip frames. if (encoding_order_gop == 0) { /* the first frame */ *frame_type = FRAME_IDR; *displaying_order = encoding_order; + // IDR frames are a special case; I honestly can't find the logic behind + // why this is the right thing, but it seems to line up nicely in practice :-) + *pts_lag = TIMEBASE / frame_rate; } else if (((encoding_order_gop - 1) % ip_period) != 0) { /* B frames */ - *frame_type = FRAME_B; + *frame_type = FRAME_B; *displaying_order = encoding_order - 1; - } else if ((intra_period != 0) && /* have I frames */ - (encoding_order_gop >= 2) && - ((ip_period == 1 && encoding_order_gop % intra_period == 0) || /* for IDR PPPPP IPPPP */ - /* for IDR (PBB)(PBB)(IBB) */ - (ip_period >= 2 && ((encoding_order_gop - 1) / ip_period % (intra_period / ip_period)) == 0))) { - *frame_type = FRAME_I; - *displaying_order = encoding_order + ip_period - 1; + if ((encoding_order_gop % ip_period) == 0) { + *pts_lag = 0; // Last B-frame. + } + } else if (intra_period != 0 && /* have I frames */ + encoding_order_gop >= 2 && + ((encoding_order_gop - 1) / ip_period % (intra_period / ip_period)) == 0) { + *frame_type = FRAME_I; + *displaying_order = encoding_order + ip_period - 1; } else { - *frame_type = FRAME_P; - *displaying_order = encoding_order + ip_period - 1; + *frame_type = FRAME_P; + *displaying_order = encoding_order + ip_period - 1; } } @@ -1578,6 +1639,8 @@ int H264Encoder::save_codeddata(storage_task task) string data; + const int64_t global_delay = (ip_period - 1) * (TIMEBASE / frame_rate); // So we never get negative dts. + va_status = vaMapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list)); CHECK_VASTATUS(va_status, "vaMapBuffer"); while (buf_list != NULL) { @@ -1588,23 +1651,13 @@ int H264Encoder::save_codeddata(storage_task task) } vaUnmapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf); - const int64_t pts_dts_delay = (ip_period - 1) * (TIMEBASE / frame_rate); - const int64_t av_delay = TIMEBASE / 30; // Corresponds to the fixed delay in resampler.h. TODO: Make less hard-coded. { - int64_t pts, dts; - { - unique_lock lock(frame_queue_mutex); - assert(timestamps.count(task.display_order)); - assert(timestamps.count(task.encode_order)); - pts = timestamps[task.display_order]; - dts = timestamps[task.encode_order]; - } // Add video. AVPacket pkt; memset(&pkt, 0, sizeof(pkt)); pkt.buf = nullptr; - pkt.pts = av_rescale_q(pts + av_delay + pts_dts_delay, AVRational{1, TIMEBASE}, avstream_video->time_base); - pkt.dts = av_rescale_q(dts + av_delay, AVRational{1, TIMEBASE}, avstream_video->time_base); + pkt.pts = av_rescale_q(task.pts + global_delay, AVRational{1, TIMEBASE}, avstream_video->time_base); + pkt.dts = av_rescale_q(task.dts + global_delay, AVRational{1, TIMEBASE}, avstream_video->time_base); pkt.data = reinterpret_cast(&data[0]); pkt.size = data.size(); pkt.stream_index = 0; @@ -1614,26 +1667,22 @@ int H264Encoder::save_codeddata(storage_task task) pkt.flags = 0; } //pkt.duration = 1; + httpd->add_packet(pkt); av_interleaved_write_frame(avctx, &pkt); } // Encode and add all audio frames up to and including the pts of this video frame. // (They can never be queued to us after the video frame they belong to, only before.) for ( ;; ) { - int display_order; - int64_t pts; + int64_t audio_pts; std::vector audio; { unique_lock lock(frame_queue_mutex); if (pending_audio_frames.empty()) break; auto it = pending_audio_frames.begin(); - if (it->first > int(task.display_order)) break; - display_order = it->first; + if (it->first > task.pts) break; + audio_pts = it->first; audio = move(it->second); pending_audio_frames.erase(it); - - auto pts_it = timestamps.find(display_order); - assert(pts_it != timestamps.end()); - pts = pts_it->second; } AVFrame *frame = avcodec_alloc_frame(); frame->nb_samples = audio.size() / 2; @@ -1654,18 +1703,15 @@ int H264Encoder::save_codeddata(storage_task task) int got_output; avcodec_encode_audio2(avstream_audio->codec, &pkt, frame, &got_output); if (got_output) { - pkt.pts = av_rescale_q(pts + pts_dts_delay, AVRational{1, TIMEBASE}, avstream_audio->time_base); + pkt.pts = av_rescale_q(audio_pts + global_delay, AVRational{1, TIMEBASE}, avstream_audio->time_base); pkt.dts = pkt.pts; pkt.stream_index = 1; + httpd->add_packet(pkt); av_interleaved_write_frame(avctx, &pkt); } // TODO: Delayed frames. avcodec_free_frame(&frame); } - { - unique_lock lock(frame_queue_mutex); - timestamps.erase(task.encode_order - (ip_period - 1)); - } #if 0 printf("\r "); /* return back to startpoint */ @@ -1777,11 +1823,10 @@ static int print_input() //H264Encoder::H264Encoder(SDL_Window *window, SDL_GLContext context, int width, int height, const char *output_filename) -H264Encoder::H264Encoder(QSurface *surface, int width, int height, const char *output_filename) - : current_storage_frame(0), surface(surface) +H264Encoder::H264Encoder(QSurface *surface, int width, int height, const char *output_filename, HTTPD *httpd) + : current_storage_frame(0), surface(surface), httpd(httpd) //: width(width), height(height), current_encoding_frame(0) { - av_register_all(); avctx = avformat_alloc_context(); avctx->oformat = av_guess_format(NULL, output_filename, NULL); strcpy(avctx->filename, output_filename); @@ -1946,13 +1991,21 @@ bool H264Encoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex) return true; } -void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, std::vector audio, const std::vector &input_frames) +void H264Encoder::add_audio(int64_t pts, std::vector audio) { { unique_lock lock(frame_queue_mutex); - pending_video_frames[current_storage_frame] = PendingFrame{ fence, input_frames }; - pending_audio_frames[current_storage_frame] = move(audio); - timestamps[current_storage_frame] = pts; + pending_audio_frames[pts] = move(audio); + } + frame_queue_nonempty.notify_one(); +} + + +void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const std::vector &input_frames) +{ + { + unique_lock lock(frame_queue_mutex); + pending_video_frames[current_storage_frame] = PendingFrame{ fence, input_frames, pts }; ++current_storage_frame; } frame_queue_nonempty.notify_one(); @@ -1960,10 +2013,12 @@ void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, std::vector