X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=h264encode.cpp;h=b329c983969e2f6696795f7afe53410ef2ff6484;hb=e45ebd282e3c6c369475ef0987945eca62ef8f58;hp=784531663c3e6f7191f1847130b3291d34ee7203;hpb=9284d85b2f5c8da5ae5555c1cf24b5af0dbcc285;p=nageru diff --git a/h264encode.cpp b/h264encode.cpp index 7845316..b329c98 100644 --- a/h264encode.cpp +++ b/h264encode.cpp @@ -38,6 +38,7 @@ extern "C" { #include "context.h" #include "defs.h" +#include "flags.h" #include "httpd.h" #include "timebase.h" @@ -84,6 +85,8 @@ class QSurface; #define BITSTREAM_ALLOCATE_STEPPING 4096 #define SURFACE_NUM 16 /* 16 surfaces for source YUV */ +#define MAX_NUM_REF1 16 // Seemingly a hardware-fixed value, not related to SURFACE_NUM +#define MAX_NUM_REF2 32 // Seemingly a hardware-fixed value, not related to SURFACE_NUM static constexpr unsigned int MaxFrameNum = (2<<16); static constexpr unsigned int MaxPicOrderCntLsb = (2<<8); @@ -111,14 +114,92 @@ typedef struct __bitstream bitstream; using namespace std; +// H.264 video comes out in encoding order (e.g. with two B-frames: +// 0, 3, 1, 2, 6, 4, 5, etc.), but uncompressed video needs to +// come in the right order. Since we do everything, including waiting +// for the frames to come out of OpenGL, in encoding order, we need +// a reordering buffer for uncompressed frames so that they come out +// correctly. We go the super-lazy way of not making it understand +// anything about the true order (which introduces some extra latency, +// though); we know that for N B-frames we need at most (N-1) frames +// in the reorder buffer, and can just sort on that. +// +// The class also deals with keeping a freelist as needed. +class FrameReorderer { +public: + FrameReorderer(unsigned queue_length, int width, int height); + + // Returns the next frame to insert with its pts, if any. Otherwise -1 and nullptr. + // Does _not_ take ownership of data; a copy is taken if needed. + // The returned pointer is valid until the next call to reorder_frame, or destruction. + // As a special case, if queue_length == 0, will just return pts and data (no reordering needed). + pair reorder_frame(int64_t pts, const uint8_t *data); + + // The same as reorder_frame, but without inserting anything. Used to empty the queue. + pair get_first_frame(); + + bool empty() const { return frames.empty(); } + +private: + unsigned queue_length; + int width, height; + + priority_queue> frames; + stack freelist; // Includes the last value returned from reorder_frame. + + // Owns all the pointers. Normally, freelist and frames could do this themselves, + // except priority_queue doesn't work well with movable-only types. + vector> owner; +}; + +FrameReorderer::FrameReorderer(unsigned queue_length, int width, int height) + : queue_length(queue_length), width(width), height(height) +{ + for (unsigned i = 0; i < queue_length; ++i) { + owner.emplace_back(new uint8_t[width * height * 2]); + freelist.push(owner.back().get()); + } +} + +pair FrameReorderer::reorder_frame(int64_t pts, const uint8_t *data) +{ + if (queue_length == 0) { + return make_pair(pts, data); + } + + assert(!freelist.empty()); + uint8_t *storage = freelist.top(); + freelist.pop(); + memcpy(storage, data, width * height * 2); + frames.emplace(-pts, storage); // Invert pts to get smallest first. + + if (frames.size() >= queue_length) { + return get_first_frame(); + } else { + return make_pair(-1, nullptr); + } +} + +pair FrameReorderer::get_first_frame() +{ + assert(!frames.empty()); + pair storage = frames.top(); + frames.pop(); + int64_t pts = storage.first; + freelist.push(storage.second); + return make_pair(-pts, storage.second); // Re-invert pts (see reorder_frame()). +} + class H264EncoderImpl { public: H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd); ~H264EncoderImpl(); void add_audio(int64_t pts, vector audio); bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex); - void end_frame(RefCountedGLsync fence, int64_t pts, const vector &input_frames); + RefCountedGLsync end_frame(int64_t pts, const vector &input_frames); void shutdown(); + void open_output_file(const std::string &filename); + void close_output_file(); private: struct storage_task { @@ -133,11 +214,20 @@ private: int64_t pts; }; + // So we never get negative dts. + int64_t global_delay() const { + return int64_t(ip_period - 1) * (TIMEBASE / MAX_FPS); + } + void encode_thread_func(); void encode_remaining_frames_as_p(int encoding_frame_num, int gop_start_display_frame_num, int64_t last_dts); + void add_packet_for_uncompressed_frame(int64_t pts, const uint8_t *data); void encode_frame(PendingFrame frame, int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num, int frame_type, int64_t pts, int64_t dts); void storage_task_thread(); + void encode_audio(const vector &audio, + int64_t audio_pts, + AVCodecContext *ctx); void storage_task_enqueue(storage_task task); void save_codeddata(storage_task task); int render_packedsequence(); @@ -154,6 +244,7 @@ private: int build_packed_slice_buffer(unsigned char **header_buffer); int init_va(const string &va_display); int deinit_va(); + void enable_zerocopy_if_possible(); VADisplay va_open_display(const string &va_display); void va_close_display(VADisplay va_dpy); int setup_encode(); @@ -184,7 +275,9 @@ private: QSurface *surface; AVCodecContext *context_audio; + AVFrame *audio_frame = nullptr; HTTPD *httpd; + unique_ptr reorderer; Display *x11_display = nullptr; @@ -217,7 +310,7 @@ private: VAEncPictureParameterBufferH264 pic_param; VAEncSliceParameterBufferH264 slice_param; VAPictureH264 CurrentCurrPic; - VAPictureH264 ReferenceFrames[16], RefPicList0_P[32], RefPicList0_B[32], RefPicList1_B[32]; + VAPictureH264 ReferenceFrames[MAX_NUM_REF1], RefPicList0_P[MAX_NUM_REF2], RefPicList0_B[MAX_NUM_REF2], RefPicList1_B[MAX_NUM_REF2]; // Static quality settings. static constexpr unsigned int frame_bitrate = 15000000 / 60; // Doesn't really matter; only initial_qp does. @@ -243,6 +336,8 @@ private: int frame_height; int frame_width_mbaligned; int frame_height_mbaligned; + + unique_ptr file_mux; // To local disk. }; // Supposedly vaRenderPicture() is supposed to destroy the buffer implicitly, @@ -844,6 +939,16 @@ static const char *rc_to_string(int rc_mode) } } +void H264EncoderImpl::enable_zerocopy_if_possible() +{ + if (global_flags.uncompressed_video_to_http) { + fprintf(stderr, "Disabling zerocopy H.264 encoding due to --uncompressed_video_to_http.\n"); + use_zerocopy = false; + } else { + use_zerocopy = true; + } +} + VADisplay H264EncoderImpl::va_open_display(const string &va_display) { if (va_display.empty()) { @@ -852,7 +957,7 @@ VADisplay H264EncoderImpl::va_open_display(const string &va_display) fprintf(stderr, "error: can't connect to X server!\n"); return NULL; } - use_zerocopy = true; + enable_zerocopy_if_possible(); return vaGetDisplay(x11_display); } else if (va_display[0] != '/') { x11_display = XOpenDisplay(va_display.c_str()); @@ -860,7 +965,7 @@ VADisplay H264EncoderImpl::va_open_display(const string &va_display) fprintf(stderr, "error: can't connect to X server!\n"); return NULL; } - use_zerocopy = true; + enable_zerocopy_if_possible(); return vaGetDisplay(x11_display); } else { drm_fd = open(va_display.c_str(), O_RDWR); @@ -924,7 +1029,7 @@ int H264EncoderImpl::init_va(const string &va_display) if (support_encode == 0) { printf("Can't find VAEntrypointEncSlice for H264 profiles. If you are using a non-Intel GPU\n"); - printf("but have one in your system, try launching Nageru with --va-display /dev/dri/card0\n"); + printf("but have one in your system, try launching Nageru with --va-display /dev/dri/renderD128\n"); printf("to use VA-API against DRM instead of X11.\n"); exit(1); } else { @@ -1298,7 +1403,7 @@ int H264EncoderImpl::render_picture(int frame_type, int display_frame_num, int g CurrentCurrPic = pic_param.CurrPic; memcpy(pic_param.ReferenceFrames, ReferenceFrames, numShortTerm*sizeof(VAPictureH264)); - for (i = numShortTerm; i < SURFACE_NUM; i++) { + for (i = numShortTerm; i < MAX_NUM_REF1; i++) { pic_param.ReferenceFrames[i].picture_id = VA_INVALID_SURFACE; pic_param.ReferenceFrames[i].flags = VA_PICTURE_H264_INVALID; } @@ -1448,7 +1553,7 @@ int H264EncoderImpl::render_slice(int encoding_frame_num, int display_frame_num, int refpiclist0_max = h264_maxref & 0xffff; memcpy(slice_param.RefPicList0, RefPicList0_P, refpiclist0_max*sizeof(VAPictureH264)); - for (i = refpiclist0_max; i < 32; i++) { + for (i = refpiclist0_max; i < MAX_NUM_REF2; i++) { slice_param.RefPicList0[i].picture_id = VA_INVALID_SURFACE; slice_param.RefPicList0[i].flags = VA_PICTURE_H264_INVALID; } @@ -1457,13 +1562,13 @@ int H264EncoderImpl::render_slice(int encoding_frame_num, int display_frame_num, int refpiclist1_max = (h264_maxref >> 16) & 0xffff; memcpy(slice_param.RefPicList0, RefPicList0_B, refpiclist0_max*sizeof(VAPictureH264)); - for (i = refpiclist0_max; i < 32; i++) { + for (i = refpiclist0_max; i < MAX_NUM_REF2; i++) { slice_param.RefPicList0[i].picture_id = VA_INVALID_SURFACE; slice_param.RefPicList0[i].flags = VA_PICTURE_H264_INVALID; } memcpy(slice_param.RefPicList1, RefPicList1_B, refpiclist1_max*sizeof(VAPictureH264)); - for (i = refpiclist1_max; i < 32; i++) { + for (i = refpiclist1_max; i < MAX_NUM_REF2; i++) { slice_param.RefPicList1[i].picture_id = VA_INVALID_SURFACE; slice_param.RefPicList1[i].flags = VA_PICTURE_H264_INVALID; } @@ -1492,116 +1597,123 @@ int H264EncoderImpl::render_slice(int encoding_frame_num, int display_frame_num, void H264EncoderImpl::save_codeddata(storage_task task) { - VACodedBufferSegment *buf_list = NULL; - VAStatus va_status; + VACodedBufferSegment *buf_list = NULL; + VAStatus va_status; - string data; + string data; - const int64_t global_delay = int64_t(ip_period - 1) * (TIMEBASE / MAX_FPS); // So we never get negative dts. + va_status = vaMapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list)); + CHECK_VASTATUS(va_status, "vaMapBuffer"); + while (buf_list != NULL) { + data.append(reinterpret_cast(buf_list->buf), buf_list->size); + buf_list = (VACodedBufferSegment *) buf_list->next; + } + vaUnmapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf); - va_status = vaMapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list)); - CHECK_VASTATUS(va_status, "vaMapBuffer"); - while (buf_list != NULL) { - data.append(reinterpret_cast(buf_list->buf), buf_list->size); - buf_list = (VACodedBufferSegment *) buf_list->next; - } - vaUnmapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf); - - { - // Add video. - AVPacket pkt; - memset(&pkt, 0, sizeof(pkt)); - pkt.buf = nullptr; - pkt.data = reinterpret_cast(&data[0]); - pkt.size = data.size(); - pkt.stream_index = 0; - if (task.frame_type == FRAME_IDR || task.frame_type == FRAME_I) { - pkt.flags = AV_PKT_FLAG_KEY; - } else { - pkt.flags = 0; - } - //pkt.duration = 1; - httpd->add_packet(pkt, task.pts + global_delay, task.dts + global_delay); - } - // Encode and add all audio frames up to and including the pts of this video frame. - for ( ;; ) { - int64_t audio_pts; - vector audio; - { - unique_lock lock(frame_queue_mutex); - frame_queue_nonempty.wait(lock, [this]{ return storage_thread_should_quit || !pending_audio_frames.empty(); }); - if (storage_thread_should_quit && pending_audio_frames.empty()) return; - auto it = pending_audio_frames.begin(); - if (it->first > task.pts) break; - audio_pts = it->first; - audio = move(it->second); - pending_audio_frames.erase(it); - } + { + // Add video. + AVPacket pkt; + memset(&pkt, 0, sizeof(pkt)); + pkt.buf = nullptr; + pkt.data = reinterpret_cast(&data[0]); + pkt.size = data.size(); + pkt.stream_index = 0; + if (task.frame_type == FRAME_IDR) { + pkt.flags = AV_PKT_FLAG_KEY; + } else { + pkt.flags = 0; + } + //pkt.duration = 1; + if (file_mux) { + file_mux->add_packet(pkt, task.pts + global_delay(), task.dts + global_delay()); + } + if (!global_flags.uncompressed_video_to_http) { + httpd->add_packet(pkt, task.pts + global_delay(), task.dts + global_delay()); + } + } + // Encode and add all audio frames up to and including the pts of this video frame. + for ( ;; ) { + int64_t audio_pts; + vector audio; + { + unique_lock lock(frame_queue_mutex); + frame_queue_nonempty.wait(lock, [this]{ return storage_thread_should_quit || !pending_audio_frames.empty(); }); + if (storage_thread_should_quit && pending_audio_frames.empty()) return; + auto it = pending_audio_frames.begin(); + if (it->first > task.pts) break; + audio_pts = it->first; + audio = move(it->second); + pending_audio_frames.erase(it); + } - AVFrame *frame = avcodec_alloc_frame(); - frame->nb_samples = audio.size() / 2; - frame->format = AV_SAMPLE_FMT_S32; - frame->channel_layout = AV_CH_LAYOUT_STEREO; + encode_audio(audio, audio_pts, context_audio); - unique_ptr int_samples(new int32_t[audio.size()]); - int ret = avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), audio.size() * sizeof(int32_t), 1); - if (ret < 0) { - fprintf(stderr, "avcodec_fill_audio_frame() failed with %d\n", ret); - exit(1); - } - for (int i = 0; i < frame->nb_samples * 2; ++i) { - if (audio[i] >= 1.0f) { - int_samples[i] = 2147483647; - } else if (audio[i] <= -1.0f) { - int_samples[i] = -2147483647; - } else { - int_samples[i] = lrintf(audio[i] * 2147483647.0f); - } - } + if (audio_pts == task.pts) break; + } +} - AVPacket pkt; - av_init_packet(&pkt); - pkt.data = nullptr; - pkt.size = 0; - int got_output; - avcodec_encode_audio2(context_audio, &pkt, frame, &got_output); - if (got_output) { - pkt.stream_index = 1; - httpd->add_packet(pkt, audio_pts + global_delay, audio_pts + global_delay); - } - // TODO: Delayed frames. - avcodec_free_frame(&frame); - av_free_packet(&pkt); - if (audio_pts == task.pts) break; - } +void H264EncoderImpl::encode_audio( + const vector &audio, + int64_t audio_pts, + AVCodecContext *ctx) +{ + audio_frame->nb_samples = audio.size() / 2; + audio_frame->channel_layout = AV_CH_LAYOUT_STEREO; + + unique_ptr planar_samples; + unique_ptr int_samples; + + if (ctx->sample_fmt == AV_SAMPLE_FMT_FLTP) { + audio_frame->format = AV_SAMPLE_FMT_FLTP; + planar_samples.reset(new float[audio.size()]); + avcodec_fill_audio_frame(audio_frame, 2, AV_SAMPLE_FMT_FLTP, (const uint8_t*)planar_samples.get(), audio.size() * sizeof(float), 0); + for (int i = 0; i < audio_frame->nb_samples; ++i) { + planar_samples[i] = audio[i * 2 + 0]; + planar_samples[i + audio_frame->nb_samples] = audio[i * 2 + 1]; + } + } else { + assert(ctx->sample_fmt == AV_SAMPLE_FMT_S32); + int_samples.reset(new int32_t[audio.size()]); + int ret = avcodec_fill_audio_frame(audio_frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), audio.size() * sizeof(int32_t), 1); + if (ret < 0) { + fprintf(stderr, "avcodec_fill_audio_frame() failed with %d\n", ret); + exit(1); + } + for (int i = 0; i < audio_frame->nb_samples * 2; ++i) { + if (audio[i] >= 1.0f) { + int_samples[i] = 2147483647; + } else if (audio[i] <= -1.0f) { + int_samples[i] = -2147483647; + } else { + int_samples[i] = lrintf(audio[i] * 2147483647.0f); + } + } + } -#if 0 - printf("\r "); /* return back to startpoint */ - switch (encode_order % 4) { - case 0: - printf("|"); - break; - case 1: - printf("/"); - break; - case 2: - printf("-"); - break; - case 3: - printf("\\"); - break; - } - printf("%08lld", encode_order); -#endif + AVPacket pkt; + av_init_packet(&pkt); + pkt.data = nullptr; + pkt.size = 0; + int got_output = 0; + avcodec_encode_audio2(context_audio, &pkt, audio_frame, &got_output); + if (got_output) { + pkt.stream_index = 1; + pkt.flags = AV_PKT_FLAG_KEY; + if (file_mux) { + file_mux->add_packet(pkt, audio_pts + global_delay(), audio_pts + global_delay()); + } + httpd->add_packet(pkt, audio_pts + global_delay(), audio_pts + global_delay()); + } + // TODO: Delayed frames. + av_frame_unref(audio_frame); + av_free_packet(&pkt); } - // this is weird. but it seems to put a new frame onto the queue void H264EncoderImpl::storage_task_enqueue(storage_task task) { unique_lock lock(storage_task_queue_mutex); storage_task_queue.push(move(task)); - srcsurface_status[task.display_order % SURFACE_NUM] = SRC_SURFACE_IN_ENCODING; storage_task_queue_changed.notify_all(); } @@ -1665,23 +1777,54 @@ int H264EncoderImpl::deinit_va() return 0; } +namespace { -H264EncoderImpl::H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd) - : current_storage_frame(0), surface(surface), httpd(httpd) +void init_audio_encoder(const string &codec_name, int bit_rate, AVCodecContext **ctx) { - AVCodec *codec_audio = avcodec_find_encoder(AUDIO_OUTPUT_CODEC); - context_audio = avcodec_alloc_context3(codec_audio); - context_audio->bit_rate = AUDIO_OUTPUT_BIT_RATE; + AVCodec *codec_audio = avcodec_find_encoder_by_name(codec_name.c_str()); + if (codec_audio == nullptr) { + fprintf(stderr, "ERROR: Could not find codec '%s'\n", codec_name.c_str()); + exit(1); + } + + AVCodecContext *context_audio = avcodec_alloc_context3(codec_audio); + context_audio->bit_rate = bit_rate; context_audio->sample_rate = OUTPUT_FREQUENCY; - context_audio->sample_fmt = AUDIO_OUTPUT_SAMPLE_FMT; + + // Choose sample format; we currently only support these two + // (see encode_audio), so we're a bit picky. + const AVSampleFormat *ptr = codec_audio->sample_fmts; + for ( ; *ptr != -1; ++ptr) { + if (*ptr == AV_SAMPLE_FMT_FLTP || *ptr == AV_SAMPLE_FMT_S32) { + context_audio->sample_fmt = *ptr; + break; + } + } + if (*ptr == -1) { + fprintf(stderr, "ERROR: Audio codec does not support fltp or s32 sample formats\n"); + exit(1); + } + context_audio->channels = 2; context_audio->channel_layout = AV_CH_LAYOUT_STEREO; context_audio->time_base = AVRational{1, TIMEBASE}; if (avcodec_open2(context_audio, codec_audio, NULL) < 0) { - fprintf(stderr, "Could not open codec\n"); + fprintf(stderr, "Could not open codec '%s'\n", codec_name.c_str()); exit(1); } + *ctx = context_audio; +} + +} // namespace + +H264EncoderImpl::H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd) + : current_storage_frame(0), surface(surface), httpd(httpd) +{ + init_audio_encoder(AUDIO_OUTPUT_CODEC_NAME, AUDIO_OUTPUT_BIT_RATE, &context_audio); + + audio_frame = av_frame_alloc(); + frame_width = width; frame_height = height; frame_width_mbaligned = (frame_width + 15) & (~15); @@ -1689,6 +1832,10 @@ H264EncoderImpl::H264EncoderImpl(QSurface *surface, const string &va_display, in //print_input(); + if (global_flags.uncompressed_video_to_http) { + reorderer.reset(new FrameReorderer(ip_period - 1, frame_width, frame_height)); + } + init_va(va_display); setup_encode(); @@ -1717,6 +1864,9 @@ H264EncoderImpl::H264EncoderImpl(QSurface *surface, const string &va_display, in H264EncoderImpl::~H264EncoderImpl() { shutdown(); + av_frame_free(&audio_frame); + + // TODO: Destroy context. } bool H264EncoderImpl::begin_frame(GLuint *y_tex, GLuint *cbcr_tex) @@ -1725,7 +1875,12 @@ bool H264EncoderImpl::begin_frame(GLuint *y_tex, GLuint *cbcr_tex) { // Wait until this frame slot is done encoding. unique_lock lock(storage_task_queue_mutex); + if (srcsurface_status[current_storage_frame % SURFACE_NUM] != SRC_SURFACE_FREE) { + fprintf(stderr, "Warning: Slot %d (for frame %d) is still encoding, rendering has to wait for H.264 encoder\n", + current_storage_frame % SURFACE_NUM, current_storage_frame); + } storage_task_queue_changed.wait(lock, [this]{ return storage_thread_should_quit || (srcsurface_status[current_storage_frame % SURFACE_NUM] == SRC_SURFACE_FREE); }); + srcsurface_status[current_storage_frame % SURFACE_NUM] = SRC_SURFACE_IN_ENCODING; if (storage_thread_should_quit) return false; } @@ -1795,7 +1950,7 @@ void H264EncoderImpl::add_audio(int64_t pts, vector audio) frame_queue_nonempty.notify_all(); } -void H264EncoderImpl::end_frame(RefCountedGLsync fence, int64_t pts, const vector &input_frames) +RefCountedGLsync H264EncoderImpl::end_frame(int64_t pts, const vector &input_frames) { assert(!is_shutdown); @@ -1825,16 +1980,20 @@ void H264EncoderImpl::end_frame(RefCountedGLsync fence, int64_t pts, const vecto glMemoryBarrier(GL_TEXTURE_UPDATE_BARRIER_BIT | GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT); check_error(); - fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0); - check_error(); } + RefCountedGLsync fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0); + check_error(); + glFlush(); // Make the H.264 thread see the fence as soon as possible. + check_error(); + { unique_lock lock(frame_queue_mutex); pending_video_frames[current_storage_frame] = PendingFrame{ fence, input_frames, pts }; ++current_storage_frame; } frame_queue_nonempty.notify_all(); + return fence; } void H264EncoderImpl::shutdown() @@ -1862,6 +2021,29 @@ void H264EncoderImpl::shutdown() is_shutdown = true; } +void H264EncoderImpl::open_output_file(const std::string &filename) +{ + AVFormatContext *avctx = avformat_alloc_context(); + avctx->oformat = av_guess_format(NULL, filename.c_str(), NULL); + assert(filename.size() < sizeof(avctx->filename) - 1); + strcpy(avctx->filename, filename.c_str()); + + string url = "file:" + filename; + int ret = avio_open2(&avctx->pb, url.c_str(), AVIO_FLAG_WRITE, &avctx->interrupt_callback, NULL); + if (ret < 0) { + char tmp[AV_ERROR_MAX_STRING_SIZE]; + fprintf(stderr, "%s: avio_open2() failed: %s\n", filename.c_str(), av_make_error_string(tmp, sizeof(tmp), ret)); + exit(1); + } + + file_mux.reset(new Mux(avctx, frame_width, frame_height, Mux::CODEC_H264, TIMEBASE)); +} + +void H264EncoderImpl::close_output_file() +{ + file_mux.reset(); +} + void H264EncoderImpl::encode_thread_func() { int64_t last_dts = -1; @@ -1926,6 +2108,26 @@ void H264EncoderImpl::encode_remaining_frames_as_p(int encoding_frame_num, int g encode_frame(frame, encoding_frame_num++, display_frame_num, gop_start_display_frame_num, FRAME_P, frame.pts, dts); last_dts = dts; } + + if (global_flags.uncompressed_video_to_http) { + // Add frames left in reorderer. + while (!reorderer->empty()) { + pair output_frame = reorderer->get_first_frame(); + add_packet_for_uncompressed_frame(output_frame.first, output_frame.second); + } + } +} + +void H264EncoderImpl::add_packet_for_uncompressed_frame(int64_t pts, const uint8_t *data) +{ + AVPacket pkt; + memset(&pkt, 0, sizeof(pkt)); + pkt.buf = nullptr; + pkt.data = const_cast(data); + pkt.size = frame_width * frame_height * 2; + pkt.stream_index = 0; + pkt.flags = AV_PKT_FLAG_KEY; + httpd->add_packet(pkt, pts, pts); } namespace { @@ -1979,6 +2181,15 @@ void H264EncoderImpl::encode_frame(H264EncoderImpl::PendingFrame frame, int enco va_status = vaUnmapBuffer(va_dpy, surf->surface_image.buf); CHECK_VASTATUS(va_status, "vaUnmapBuffer"); + + if (global_flags.uncompressed_video_to_http) { + // Add uncompressed video. (Note that pts == dts here.) + // Delay needs to match audio. + pair output_frame = reorderer->reorder_frame(pts + global_delay(), reinterpret_cast(surf->y_ptr)); + if (output_frame.second != nullptr) { + add_packet_for_uncompressed_frame(output_frame.first, output_frame.second); + } + } } va_status = vaDestroyImage(va_dpy, surf->surface_image.image_id); @@ -2034,9 +2245,9 @@ bool H264Encoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex) return impl->begin_frame(y_tex, cbcr_tex); } -void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const vector &input_frames) +RefCountedGLsync H264Encoder::end_frame(int64_t pts, const vector &input_frames) { - impl->end_frame(fence, pts, input_frames); + return impl->end_frame(pts, input_frames); } void H264Encoder::shutdown() @@ -2044,4 +2255,12 @@ void H264Encoder::shutdown() impl->shutdown(); } -// Real class. +void H264Encoder::open_output_file(const std::string &filename) +{ + impl->open_output_file(filename); +} + +void H264Encoder::close_output_file() +{ + impl->close_output_file(); +}