X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=h264encode.cpp;h=b9c52eb55e00914406fc73b131eb47657f13336f;hb=8348925c4cb0d7a73b07db03c6bc6d55fa0631b8;hp=a417c30fb8fbb5dbda794816d44d9d383790bc9a;hpb=8ed6a8ebad548bc2c162100c70fa3d952fb0ce17;p=nageru diff --git a/h264encode.cpp b/h264encode.cpp index a417c30..b9c52eb 100644 --- a/h264encode.cpp +++ b/h264encode.cpp @@ -1,6 +1,7 @@ //#include "sysdeps.h" #include "h264encode.h" +#include #include #include #include @@ -9,19 +10,24 @@ extern "C" { #include #include +#include #include #include #include #include +#include } #include #include #include #include +#include #include +#include #include #include #include +#include #include #include #include @@ -34,8 +40,11 @@ extern "C" { #include "context.h" #include "defs.h" +#include "flags.h" #include "httpd.h" +#include "mux.h" #include "timebase.h" +#include "x264encode.h" using namespace std; @@ -48,6 +57,8 @@ class QSurface; exit(1); \ } +#define BUFFER_OFFSET(i) ((char *)NULL + (i)) + //#include "loadsurface.h" #define NAL_REF_IDC_NONE 0 @@ -78,6 +89,8 @@ class QSurface; #define BITSTREAM_ALLOCATE_STEPPING 4096 #define SURFACE_NUM 16 /* 16 surfaces for source YUV */ +#define MAX_NUM_REF1 16 // Seemingly a hardware-fixed value, not related to SURFACE_NUM +#define MAX_NUM_REF2 32 // Seemingly a hardware-fixed value, not related to SURFACE_NUM static constexpr unsigned int MaxFrameNum = (2<<16); static constexpr unsigned int MaxPicOrderCntLsb = (2<<8); @@ -105,32 +118,133 @@ typedef struct __bitstream bitstream; using namespace std; -class H264EncoderImpl { +// H.264 video comes out in encoding order (e.g. with two B-frames: +// 0, 3, 1, 2, 6, 4, 5, etc.), but uncompressed video needs to +// come in the right order. Since we do everything, including waiting +// for the frames to come out of OpenGL, in encoding order, we need +// a reordering buffer for uncompressed frames so that they come out +// correctly. We go the super-lazy way of not making it understand +// anything about the true order (which introduces some extra latency, +// though); we know that for N B-frames we need at most (N-1) frames +// in the reorder buffer, and can just sort on that. +// +// The class also deals with keeping a freelist as needed. +class FrameReorderer { +public: + FrameReorderer(unsigned queue_length, int width, int height); + + // Returns the next frame to insert with its pts, if any. Otherwise -1 and nullptr. + // Does _not_ take ownership of data; a copy is taken if needed. + // The returned pointer is valid until the next call to reorder_frame, or destruction. + // As a special case, if queue_length == 0, will just return pts and data (no reordering needed). + pair reorder_frame(int64_t pts, const uint8_t *data); + + // The same as reorder_frame, but without inserting anything. Used to empty the queue. + pair get_first_frame(); + + bool empty() const { return frames.empty(); } + +private: + unsigned queue_length; + int width, height; + + priority_queue> frames; + stack freelist; // Includes the last value returned from reorder_frame. + + // Owns all the pointers. Normally, freelist and frames could do this themselves, + // except priority_queue doesn't work well with movable-only types. + vector> owner; +}; + +FrameReorderer::FrameReorderer(unsigned queue_length, int width, int height) + : queue_length(queue_length), width(width), height(height) +{ + for (unsigned i = 0; i < queue_length; ++i) { + owner.emplace_back(new uint8_t[width * height * 2]); + freelist.push(owner.back().get()); + } +} + +pair FrameReorderer::reorder_frame(int64_t pts, const uint8_t *data) +{ + if (queue_length == 0) { + return make_pair(pts, data); + } + + assert(!freelist.empty()); + uint8_t *storage = freelist.top(); + freelist.pop(); + memcpy(storage, data, width * height * 2); + frames.emplace(-pts, storage); // Invert pts to get smallest first. + + if (frames.size() >= queue_length) { + return get_first_frame(); + } else { + return make_pair(-1, nullptr); + } +} + +pair FrameReorderer::get_first_frame() +{ + assert(!frames.empty()); + pair storage = frames.top(); + frames.pop(); + int64_t pts = storage.first; + freelist.push(storage.second); + return make_pair(-pts, storage.second); // Re-invert pts (see reorder_frame()). +} + +class H264EncoderImpl : public KeyFrameSignalReceiver { public: - H264EncoderImpl(QSurface *surface, int width, int height, HTTPD *httpd); + H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd); ~H264EncoderImpl(); - void add_audio(int64_t pts, std::vector audio); // Needs to come before end_frame() of same pts. + void add_audio(int64_t pts, vector audio); bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex); - void end_frame(RefCountedGLsync fence, int64_t pts, const std::vector &input_frames); + RefCountedGLsync end_frame(int64_t pts, const vector &input_frames); + void shutdown(); + void open_output_file(const std::string &filename); + void close_output_file(); + + virtual void signal_keyframe() override { + stream_mux_writing_keyframes = true; + } private: struct storage_task { unsigned long long display_order; int frame_type; - std::vector audio; + vector audio; int64_t pts, dts; }; struct PendingFrame { RefCountedGLsync fence; - std::vector input_frames; + vector input_frames; int64_t pts; }; + // So we never get negative dts. + int64_t global_delay() const { + return int64_t(ip_period - 1) * (TIMEBASE / MAX_FPS); + } + void encode_thread_func(); void encode_remaining_frames_as_p(int encoding_frame_num, int gop_start_display_frame_num, int64_t last_dts); + void add_packet_for_uncompressed_frame(int64_t pts, const uint8_t *data); void encode_frame(PendingFrame frame, int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num, int frame_type, int64_t pts, int64_t dts); void storage_task_thread(); + void encode_audio(const vector &audio, + vector *audio_queue, + int64_t audio_pts, + AVCodecContext *ctx, + AVAudioResampleContext *resampler, + const vector &muxes); + void encode_audio_one_frame(const float *audio, + size_t num_samples, // In each channel. + int64_t audio_pts, + AVCodecContext *ctx, + AVAudioResampleContext *resampler, + const vector &muxes); void storage_task_enqueue(storage_task task); void save_codeddata(storage_task task); int render_packedsequence(); @@ -145,38 +259,57 @@ private: void slice_header(bitstream *bs); int build_packed_seq_buffer(unsigned char **header_buffer); int build_packed_slice_buffer(unsigned char **header_buffer); - int init_va(); + int init_va(const string &va_display); int deinit_va(); - VADisplay va_open_display(void); + void enable_zerocopy_if_possible(); + VADisplay va_open_display(const string &va_display); void va_close_display(VADisplay va_dpy); int setup_encode(); int release_encode(); void update_ReferenceFrames(int frame_type); int update_RefPicList(int frame_type); + void open_output_stream(); + void close_output_stream(); + static int write_packet_thunk(void *opaque, uint8_t *buf, int buf_size); + int write_packet(uint8_t *buf, int buf_size); + + bool is_shutdown = false; + bool use_zerocopy; + int drm_fd = -1; - std::thread encode_thread, storage_thread; + thread encode_thread, storage_thread; - std::mutex storage_task_queue_mutex; - std::condition_variable storage_task_queue_changed; + mutex storage_task_queue_mutex; + condition_variable storage_task_queue_changed; int srcsurface_status[SURFACE_NUM]; // protected by storage_task_queue_mutex - std::queue storage_task_queue; // protected by storage_task_queue_mutex + queue storage_task_queue; // protected by storage_task_queue_mutex bool storage_thread_should_quit = false; // protected by storage_task_queue_mutex - std::mutex frame_queue_mutex; - std::condition_variable frame_queue_nonempty; + mutex frame_queue_mutex; + condition_variable frame_queue_nonempty; bool encode_thread_should_quit = false; // under frame_queue_mutex int current_storage_frame; - std::map pending_video_frames; // under frame_queue_mutex - std::map> pending_audio_frames; // under frame_queue_mutex + map pending_video_frames; // under frame_queue_mutex + map> pending_audio_frames; // under frame_queue_mutex QSurface *surface; - AVCodecContext *context_audio; + AVCodecContext *context_audio_file; + AVCodecContext *context_audio_stream = nullptr; // nullptr = don't code separate audio for stream. + + AVAudioResampleContext *resampler_audio_file = nullptr; + AVAudioResampleContext *resampler_audio_stream = nullptr; + + vector audio_queue_file; + vector audio_queue_stream; + + AVFrame *audio_frame = nullptr; HTTPD *httpd; + unique_ptr reorderer; + unique_ptr x264_encoder; // nullptr if not using x264. - Display *x11_display; - Window x11_window; + Display *x11_display = nullptr; // Encoder parameters VADisplay va_dpy; @@ -190,7 +323,14 @@ private: VAImage surface_image; GLuint y_tex, cbcr_tex; + + // Only if use_zerocopy == true. EGLImage y_egl_image, cbcr_egl_image; + + // Only if use_zerocopy == false. + GLuint pbo; + uint8_t *y_ptr, *cbcr_ptr; + size_t y_offset, cbcr_offset; }; GLSurface gl_surfaces[SURFACE_NUM]; @@ -200,7 +340,7 @@ private: VAEncPictureParameterBufferH264 pic_param; VAEncSliceParameterBufferH264 slice_param; VAPictureH264 CurrentCurrPic; - VAPictureH264 ReferenceFrames[16], RefPicList0_P[32], RefPicList0_B[32], RefPicList1_B[32]; + VAPictureH264 ReferenceFrames[MAX_NUM_REF1], RefPicList0_P[MAX_NUM_REF2], RefPicList0_B[MAX_NUM_REF2], RefPicList1_B[MAX_NUM_REF2]; // Static quality settings. static constexpr unsigned int frame_bitrate = 15000000 / 60; // Doesn't really matter; only initial_qp does. @@ -226,8 +366,17 @@ private: int frame_height; int frame_width_mbaligned; int frame_height_mbaligned; -}; + unique_ptr stream_mux; // To HTTP. + unique_ptr file_mux; // To local disk. + + // While Mux object is constructing, is true, + // and the header is being collected into stream_mux_header. + bool stream_mux_writing_header; + string stream_mux_header; + + bool stream_mux_writing_keyframes = false; +}; // Supposedly vaRenderPicture() is supposed to destroy the buffer implicitly, // but if we don't delete it here, we get leaks. The GStreamer implementation @@ -290,7 +439,11 @@ bitstream_put_ui(bitstream *bs, unsigned int val, int size_in_bits) bs->buffer[pos] = (bs->buffer[pos] << size_in_bits | val); } else { size_in_bits -= bit_left; - bs->buffer[pos] = (bs->buffer[pos] << bit_left) | (val >> size_in_bits); + if (bit_left >= 32) { + bs->buffer[pos] = (val >> size_in_bits); + } else { + bs->buffer[pos] = (bs->buffer[pos] << bit_left) | (val >> size_in_bits); + } bs->buffer[pos] = va_swap32(bs->buffer[pos]); if (pos + 1 == bs->max_size_in_dword) { @@ -824,174 +977,60 @@ static const char *rc_to_string(int rc_mode) } } -#if 0 -static int process_cmdline(int argc, char *argv[]) -{ - char c; - const struct option long_opts[] = { - {"help", no_argument, NULL, 0 }, - {"bitrate", required_argument, NULL, 1 }, - {"minqp", required_argument, NULL, 2 }, - {"initialqp", required_argument, NULL, 3 }, - {"intra_period", required_argument, NULL, 4 }, - {"idr_period", required_argument, NULL, 5 }, - {"ip_period", required_argument, NULL, 6 }, - {"rcmode", required_argument, NULL, 7 }, - {"srcyuv", required_argument, NULL, 9 }, - {"recyuv", required_argument, NULL, 10 }, - {"fourcc", required_argument, NULL, 11 }, - {"syncmode", no_argument, NULL, 12 }, - {"enablePSNR", no_argument, NULL, 13 }, - {"prit", required_argument, NULL, 14 }, - {"priv", required_argument, NULL, 15 }, - {"framecount", required_argument, NULL, 16 }, - {"entropy", required_argument, NULL, 17 }, - {"profile", required_argument, NULL, 18 }, - {NULL, no_argument, NULL, 0 }}; - int long_index; - - while ((c =getopt_long_only(argc, argv, "w:h:n:f:o:?", long_opts, &long_index)) != EOF) { - switch (c) { - case 'w': - frame_width = atoi(optarg); - break; - case 'h': - frame_height = atoi(optarg); - break; - case 'n': - case 'f': - frame_rate = atoi(optarg); - break; - case 'o': - coded_fn = strdup(optarg); - break; - case 0: - print_help(); - exit(0); - case 1: - frame_bitrate = atoi(optarg); - break; - case 2: - minimal_qp = atoi(optarg); - break; - case 3: - initial_qp = atoi(optarg); - break; - case 4: - intra_period = atoi(optarg); - break; - case 5: - intra_idr_period = atoi(optarg); - break; - case 6: - ip_period = atoi(optarg); - break; - case 7: - rc_mode = string_to_rc(optarg); - if (rc_mode < 0) { - print_help(); - exit(1); - } - break; - case 9: - srcyuv_fn = strdup(optarg); - break; - case 11: - srcyuv_fourcc = string_to_fourcc(optarg); - if (srcyuv_fourcc <= 0) { - print_help(); - exit(1); - } - break; - case 13: - calc_psnr = 1; - break; - case 17: - h264_entropy_mode = atoi(optarg) ? 1: 0; - break; - case 18: - if (strncmp(optarg, "BP", 2) == 0) - h264_profile = VAProfileH264Baseline; - else if (strncmp(optarg, "MP", 2) == 0) - h264_profile = VAProfileH264Main; - else if (strncmp(optarg, "HP", 2) == 0) - h264_profile = VAProfileH264High; - else - h264_profile = (VAProfile)0; - break; - case ':': - case '?': - print_help(); - exit(0); - } - } - - if (ip_period < 1) { - printf(" ip_period must be greater than 0\n"); - exit(0); - } - if (intra_period != 1 && intra_period % ip_period != 0) { - printf(" intra_period must be a multiplier of ip_period\n"); - exit(0); - } - if (intra_period != 0 && intra_idr_period % intra_period != 0) { - printf(" intra_idr_period must be a multiplier of intra_period\n"); - exit(0); - } - - if (frame_bitrate == 0) - frame_bitrate = frame_width * frame_height * 12 * MAX_FPS / 50; - - if (coded_fn == NULL) { - struct stat buf; - if (stat("/tmp", &buf) == 0) - coded_fn = strdup("/tmp/test.264"); - else if (stat("/sdcard", &buf) == 0) - coded_fn = strdup("/sdcard/test.264"); - else - coded_fn = strdup("./test.264"); - } - - - frame_width_mbaligned = (frame_width + 15) & (~15); - frame_height_mbaligned = (frame_height + 15) & (~15); - if (frame_width != frame_width_mbaligned || - frame_height != frame_height_mbaligned) { - printf("Source frame is %dx%d and will code clip to %dx%d with crop\n", - frame_width, frame_height, - frame_width_mbaligned, frame_height_mbaligned - ); - } - - return 0; +void H264EncoderImpl::enable_zerocopy_if_possible() +{ + if (global_flags.uncompressed_video_to_http) { + fprintf(stderr, "Disabling zerocopy H.264 encoding due to --uncompressed_video_to_http.\n"); + use_zerocopy = false; + } else if (global_flags.x264_video_to_http) { + fprintf(stderr, "Disabling zerocopy H.264 encoding due to --x264_video_to_http.\n"); + use_zerocopy = false; + } else { + use_zerocopy = true; + } } -#endif -VADisplay H264EncoderImpl::va_open_display(void) +VADisplay H264EncoderImpl::va_open_display(const string &va_display) { - x11_display = XOpenDisplay(NULL); - if (!x11_display) { - fprintf(stderr, "error: can't connect to X server!\n"); - return NULL; - } - return vaGetDisplay(x11_display); + if (va_display.empty()) { + x11_display = XOpenDisplay(NULL); + if (!x11_display) { + fprintf(stderr, "error: can't connect to X server!\n"); + return NULL; + } + enable_zerocopy_if_possible(); + return vaGetDisplay(x11_display); + } else if (va_display[0] != '/') { + x11_display = XOpenDisplay(va_display.c_str()); + if (!x11_display) { + fprintf(stderr, "error: can't connect to X server!\n"); + return NULL; + } + enable_zerocopy_if_possible(); + return vaGetDisplay(x11_display); + } else { + drm_fd = open(va_display.c_str(), O_RDWR); + if (drm_fd == -1) { + perror(va_display.c_str()); + return NULL; + } + use_zerocopy = false; + return vaGetDisplayDRM(drm_fd); + } } void H264EncoderImpl::va_close_display(VADisplay va_dpy) { - if (!x11_display) - return; - - if (x11_window) { - XUnmapWindow(x11_display, x11_window); - XDestroyWindow(x11_display, x11_window); - x11_window = None; - } - XCloseDisplay(x11_display); - x11_display = NULL; + if (x11_display) { + XCloseDisplay(x11_display); + x11_display = nullptr; + } + if (drm_fd != -1) { + close(drm_fd); + } } -int H264EncoderImpl::init_va() +int H264EncoderImpl::init_va(const string &va_display) { VAProfile profile_list[]={VAProfileH264High, VAProfileH264Main, VAProfileH264Baseline, VAProfileH264ConstrainedBaseline}; VAEntrypoint *entrypoints; @@ -1001,7 +1040,7 @@ int H264EncoderImpl::init_va() VAStatus va_status; unsigned int i; - va_dpy = va_open_display(); + va_dpy = va_open_display(va_display); va_status = vaInitialize(va_dpy, &major_ver, &minor_ver); CHECK_VASTATUS(va_status, "vaInitialize"); @@ -1030,7 +1069,9 @@ int H264EncoderImpl::init_va() } if (support_encode == 0) { - printf("Can't find VAEntrypointEncSlice for H264 profiles\n"); + printf("Can't find VAEntrypointEncSlice for H264 profiles. If you are using a non-Intel GPU\n"); + printf("but have one in your system, try launching Nageru with --va-display /dev/dri/renderD128\n"); + printf("to use VA-API against DRM instead of X11.\n"); exit(1); } else { switch (h264_profile) { @@ -1200,6 +1241,28 @@ int H264EncoderImpl::setup_encode() for (i = 0; i < SURFACE_NUM; i++) { glGenTextures(1, &gl_surfaces[i].y_tex); glGenTextures(1, &gl_surfaces[i].cbcr_tex); + + if (!use_zerocopy) { + // Create Y image. + glBindTexture(GL_TEXTURE_2D, gl_surfaces[i].y_tex); + glTexStorage2D(GL_TEXTURE_2D, 1, GL_R8, frame_width, frame_height); + + // Create CbCr image. + glBindTexture(GL_TEXTURE_2D, gl_surfaces[i].cbcr_tex); + glTexStorage2D(GL_TEXTURE_2D, 1, GL_RG8, frame_width / 2, frame_height / 2); + + // Generate a PBO to read into. It doesn't necessarily fit 1:1 with the VA-API + // buffers, due to potentially differing pitch. + glGenBuffers(1, &gl_surfaces[i].pbo); + glBindBuffer(GL_PIXEL_PACK_BUFFER, gl_surfaces[i].pbo); + glBufferStorage(GL_PIXEL_PACK_BUFFER, frame_width * frame_height * 2, nullptr, GL_MAP_READ_BIT | GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT); + uint8_t *ptr = (uint8_t *)glMapBufferRange(GL_PIXEL_PACK_BUFFER, 0, frame_width * frame_height * 2, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT); + gl_surfaces[i].y_offset = 0; + gl_surfaces[i].cbcr_offset = frame_width * frame_height; + gl_surfaces[i].y_ptr = ptr + gl_surfaces[i].y_offset; + gl_surfaces[i].cbcr_ptr = ptr + gl_surfaces[i].cbcr_offset; + glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); + } } for (i = 0; i < SURFACE_NUM; i++) { @@ -1210,68 +1273,15 @@ int H264EncoderImpl::setup_encode() return 0; } - - -#define partition(ref, field, key, ascending) \ - while (i <= j) { \ - if (ascending) { \ - while (ref[i].field < key) \ - i++; \ - while (ref[j].field > key) \ - j--; \ - } else { \ - while (ref[i].field > key) \ - i++; \ - while (ref[j].field < key) \ - j--; \ - } \ - if (i <= j) { \ - tmp = ref[i]; \ - ref[i] = ref[j]; \ - ref[j] = tmp; \ - i++; \ - j--; \ - } \ - } \ - -static void sort_one(VAPictureH264 ref[], int left, int right, - int ascending, int frame_idx) -{ - int i = left, j = right; - unsigned int key; - VAPictureH264 tmp; - - if (frame_idx) { - key = ref[(left + right) / 2].frame_idx; - partition(ref, frame_idx, key, ascending); - } else { - key = ref[(left + right) / 2].TopFieldOrderCnt; - partition(ref, TopFieldOrderCnt, (signed int)key, ascending); - } - - /* recursion */ - if (left < j) - sort_one(ref, left, j, ascending, frame_idx); - - if (i < right) - sort_one(ref, i, right, ascending, frame_idx); -} - -static void sort_two(VAPictureH264 ref[], int left, int right, unsigned int key, unsigned int frame_idx, - int partition_ascending, int list0_ascending, int list1_ascending) +// Given a list like 1 9 3 0 2 8 4 and a pivot element 3, will produce +// +// 2 1 0 [3] 4 8 9 +template +static void sort_two(T *begin, T *end, const T &pivot, const C &less_than) { - int i = left, j = right; - VAPictureH264 tmp; - - if (frame_idx) { - partition(ref, frame_idx, key, partition_ascending); - } else { - partition(ref, TopFieldOrderCnt, (signed int)key, partition_ascending); - } - - - sort_one(ref, left, i-1, list0_ascending, frame_idx); - sort_one(ref, j+1, right, list1_ascending, frame_idx); + T *middle = partition(begin, end, [&](const T &elem) { return less_than(elem, pivot); }); + sort(begin, middle, [&](const T &a, const T &b) { return less_than(b, a); }); + sort(middle, end, less_than); } void H264EncoderImpl::update_ReferenceFrames(int frame_type) @@ -1297,21 +1307,25 @@ void H264EncoderImpl::update_ReferenceFrames(int frame_type) int H264EncoderImpl::update_RefPicList(int frame_type) { - unsigned int current_poc = CurrentCurrPic.TopFieldOrderCnt; + const auto descending_by_frame_idx = [](const VAPictureH264 &a, const VAPictureH264 &b) { + return a.frame_idx > b.frame_idx; + }; + const auto ascending_by_top_field_order_cnt = [](const VAPictureH264 &a, const VAPictureH264 &b) { + return a.TopFieldOrderCnt < b.TopFieldOrderCnt; + }; + const auto descending_by_top_field_order_cnt = [](const VAPictureH264 &a, const VAPictureH264 &b) { + return a.TopFieldOrderCnt > b.TopFieldOrderCnt; + }; if (frame_type == FRAME_P) { memcpy(RefPicList0_P, ReferenceFrames, numShortTerm * sizeof(VAPictureH264)); - sort_one(RefPicList0_P, 0, numShortTerm-1, 0, 1); - } - - if (frame_type == FRAME_B) { + sort(&RefPicList0_P[0], &RefPicList0_P[numShortTerm], descending_by_frame_idx); + } else if (frame_type == FRAME_B) { memcpy(RefPicList0_B, ReferenceFrames, numShortTerm * sizeof(VAPictureH264)); - sort_two(RefPicList0_B, 0, numShortTerm-1, current_poc, 0, - 1, 0, 1); + sort_two(&RefPicList0_B[0], &RefPicList0_B[numShortTerm], CurrentCurrPic, ascending_by_top_field_order_cnt); memcpy(RefPicList1_B, ReferenceFrames, numShortTerm * sizeof(VAPictureH264)); - sort_two(RefPicList1_B, 0, numShortTerm-1, current_poc, 0, - 0, 1, 0); + sort_two(&RefPicList1_B[0], &RefPicList1_B[numShortTerm], CurrentCurrPic, descending_by_top_field_order_cnt); } return 0; @@ -1430,7 +1444,7 @@ int H264EncoderImpl::render_picture(int frame_type, int display_frame_num, int g CurrentCurrPic = pic_param.CurrPic; memcpy(pic_param.ReferenceFrames, ReferenceFrames, numShortTerm*sizeof(VAPictureH264)); - for (i = numShortTerm; i < SURFACE_NUM; i++) { + for (i = numShortTerm; i < MAX_NUM_REF1; i++) { pic_param.ReferenceFrames[i].picture_id = VA_INVALID_SURFACE; pic_param.ReferenceFrames[i].flags = VA_PICTURE_H264_INVALID; } @@ -1580,7 +1594,7 @@ int H264EncoderImpl::render_slice(int encoding_frame_num, int display_frame_num, int refpiclist0_max = h264_maxref & 0xffff; memcpy(slice_param.RefPicList0, RefPicList0_P, refpiclist0_max*sizeof(VAPictureH264)); - for (i = refpiclist0_max; i < 32; i++) { + for (i = refpiclist0_max; i < MAX_NUM_REF2; i++) { slice_param.RefPicList0[i].picture_id = VA_INVALID_SURFACE; slice_param.RefPicList0[i].flags = VA_PICTURE_H264_INVALID; } @@ -1589,13 +1603,13 @@ int H264EncoderImpl::render_slice(int encoding_frame_num, int display_frame_num, int refpiclist1_max = (h264_maxref >> 16) & 0xffff; memcpy(slice_param.RefPicList0, RefPicList0_B, refpiclist0_max*sizeof(VAPictureH264)); - for (i = refpiclist0_max; i < 32; i++) { + for (i = refpiclist0_max; i < MAX_NUM_REF2; i++) { slice_param.RefPicList0[i].picture_id = VA_INVALID_SURFACE; slice_param.RefPicList0[i].flags = VA_PICTURE_H264_INVALID; } memcpy(slice_param.RefPicList1, RefPicList1_B, refpiclist1_max*sizeof(VAPictureH264)); - for (i = refpiclist1_max; i < 32; i++) { + for (i = refpiclist1_max; i < MAX_NUM_REF2; i++) { slice_param.RefPicList1[i].picture_id = VA_INVALID_SURFACE; slice_param.RefPicList1[i].flags = VA_PICTURE_H264_INVALID; } @@ -1624,116 +1638,151 @@ int H264EncoderImpl::render_slice(int encoding_frame_num, int display_frame_num, void H264EncoderImpl::save_codeddata(storage_task task) { - VACodedBufferSegment *buf_list = NULL; - VAStatus va_status; + VACodedBufferSegment *buf_list = NULL; + VAStatus va_status; - string data; + string data; - const int64_t global_delay = (ip_period - 1) * (TIMEBASE / MAX_FPS); // So we never get negative dts. + va_status = vaMapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list)); + CHECK_VASTATUS(va_status, "vaMapBuffer"); + while (buf_list != NULL) { + data.append(reinterpret_cast(buf_list->buf), buf_list->size); + buf_list = (VACodedBufferSegment *) buf_list->next; + } + vaUnmapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf); - va_status = vaMapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list)); - CHECK_VASTATUS(va_status, "vaMapBuffer"); - while (buf_list != NULL) { - data.append(reinterpret_cast(buf_list->buf), buf_list->size); - buf_list = (VACodedBufferSegment *) buf_list->next; - } - vaUnmapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf); - - { - // Add video. - AVPacket pkt; - memset(&pkt, 0, sizeof(pkt)); - pkt.buf = nullptr; - pkt.data = reinterpret_cast(&data[0]); - pkt.size = data.size(); - pkt.stream_index = 0; - if (task.frame_type == FRAME_IDR || task.frame_type == FRAME_I) { - pkt.flags = AV_PKT_FLAG_KEY; - } else { - pkt.flags = 0; - } - //pkt.duration = 1; - httpd->add_packet(pkt, task.pts + global_delay, task.dts + global_delay); - } - // Encode and add all audio frames up to and including the pts of this video frame. - for ( ;; ) { - int64_t audio_pts; - vector audio; - { - unique_lock lock(frame_queue_mutex); - frame_queue_nonempty.wait(lock, [this]{ return storage_thread_should_quit || !pending_audio_frames.empty(); }); - if (storage_thread_should_quit && pending_audio_frames.empty()) return; - auto it = pending_audio_frames.begin(); - if (it->first > task.pts) break; - audio_pts = it->first; - audio = move(it->second); - pending_audio_frames.erase(it); - } + { + // Add video. + AVPacket pkt; + memset(&pkt, 0, sizeof(pkt)); + pkt.buf = nullptr; + pkt.data = reinterpret_cast(&data[0]); + pkt.size = data.size(); + pkt.stream_index = 0; + if (task.frame_type == FRAME_IDR) { + pkt.flags = AV_PKT_FLAG_KEY; + } else { + pkt.flags = 0; + } + //pkt.duration = 1; + if (file_mux) { + file_mux->add_packet(pkt, task.pts + global_delay(), task.dts + global_delay()); + } + if (!global_flags.uncompressed_video_to_http && + !global_flags.x264_video_to_http) { + stream_mux->add_packet(pkt, task.pts + global_delay(), task.dts + global_delay()); + } + } + // Encode and add all audio frames up to and including the pts of this video frame. + for ( ;; ) { + int64_t audio_pts; + vector audio; + { + unique_lock lock(frame_queue_mutex); + frame_queue_nonempty.wait(lock, [this]{ return storage_thread_should_quit || !pending_audio_frames.empty(); }); + if (storage_thread_should_quit && pending_audio_frames.empty()) return; + auto it = pending_audio_frames.begin(); + if (it->first > task.pts) break; + audio_pts = it->first; + audio = move(it->second); + pending_audio_frames.erase(it); + } - AVFrame *frame = avcodec_alloc_frame(); - frame->nb_samples = audio.size() / 2; - frame->format = AV_SAMPLE_FMT_S32; - frame->channel_layout = AV_CH_LAYOUT_STEREO; + if (context_audio_stream) { + encode_audio(audio, &audio_queue_file, audio_pts, context_audio_file, resampler_audio_file, { file_mux.get() }); + encode_audio(audio, &audio_queue_stream, audio_pts, context_audio_stream, resampler_audio_stream, { stream_mux.get() }); + } else { + encode_audio(audio, &audio_queue_file, audio_pts, context_audio_file, resampler_audio_file, { stream_mux.get(), file_mux.get() }); + } - unique_ptr int_samples(new int32_t[audio.size()]); - int ret = avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), audio.size() * sizeof(int32_t), 1); - if (ret < 0) { - fprintf(stderr, "avcodec_fill_audio_frame() failed with %d\n", ret); - exit(1); - } - for (int i = 0; i < frame->nb_samples * 2; ++i) { - if (audio[i] >= 1.0f) { - int_samples[i] = 2147483647; - } else if (audio[i] <= -1.0f) { - int_samples[i] = -2147483647; - } else { - int_samples[i] = lrintf(audio[i] * 2147483647.0f); - } - } + if (audio_pts == task.pts) break; + } +} - AVPacket pkt; - av_init_packet(&pkt); - pkt.data = nullptr; - pkt.size = 0; - int got_output; - avcodec_encode_audio2(context_audio, &pkt, frame, &got_output); - if (got_output) { - pkt.stream_index = 1; - httpd->add_packet(pkt, audio_pts + global_delay, audio_pts + global_delay); - } - // TODO: Delayed frames. - avcodec_free_frame(&frame); - av_free_packet(&pkt); - if (audio_pts == task.pts) break; - } +void H264EncoderImpl::encode_audio( + const vector &audio, + vector *audio_queue, + int64_t audio_pts, + AVCodecContext *ctx, + AVAudioResampleContext *resampler, + const vector &muxes) +{ + if (ctx->frame_size == 0) { + // No queueing needed. + assert(audio_queue->empty()); + assert(audio.size() % 2 == 0); + encode_audio_one_frame(&audio[0], audio.size() / 2, audio_pts, ctx, resampler, muxes); + return; + } -#if 0 - printf("\r "); /* return back to startpoint */ - switch (encode_order % 4) { - case 0: - printf("|"); - break; - case 1: - printf("/"); - break; - case 2: - printf("-"); - break; - case 3: - printf("\\"); - break; - } - printf("%08lld", encode_order); -#endif + int64_t sample_offset = audio_queue->size(); + + audio_queue->insert(audio_queue->end(), audio.begin(), audio.end()); + size_t sample_num; + for (sample_num = 0; + sample_num + ctx->frame_size * 2 <= audio_queue->size(); + sample_num += ctx->frame_size * 2) { + int64_t adjusted_audio_pts = audio_pts + (int64_t(sample_num) - sample_offset) * TIMEBASE / (OUTPUT_FREQUENCY * 2); + encode_audio_one_frame(&(*audio_queue)[sample_num], + ctx->frame_size, + adjusted_audio_pts, + ctx, + resampler, + muxes); + } + audio_queue->erase(audio_queue->begin(), audio_queue->begin() + sample_num); } +void H264EncoderImpl::encode_audio_one_frame( + const float *audio, + size_t num_samples, + int64_t audio_pts, + AVCodecContext *ctx, + AVAudioResampleContext *resampler, + const vector &muxes) +{ + audio_frame->nb_samples = num_samples; + audio_frame->channel_layout = AV_CH_LAYOUT_STEREO; + audio_frame->format = ctx->sample_fmt; + audio_frame->sample_rate = OUTPUT_FREQUENCY; + + if (av_samples_alloc(audio_frame->data, nullptr, 2, num_samples, ctx->sample_fmt, 0) < 0) { + fprintf(stderr, "Could not allocate %ld samples.\n", num_samples); + exit(1); + } + + if (avresample_convert(resampler, audio_frame->data, 0, num_samples, + (uint8_t **)&audio, 0, num_samples) < 0) { + fprintf(stderr, "Audio conversion failed.\n"); + exit(1); + } + + AVPacket pkt; + av_init_packet(&pkt); + pkt.data = nullptr; + pkt.size = 0; + int got_output = 0; + avcodec_encode_audio2(ctx, &pkt, audio_frame, &got_output); + if (got_output) { + pkt.stream_index = 1; + pkt.flags = 0; + for (Mux *mux : muxes) { + mux->add_packet(pkt, audio_pts + global_delay(), audio_pts + global_delay()); + } + } + + av_freep(&audio_frame->data[0]); + + // TODO: Delayed frames. + av_frame_unref(audio_frame); + av_free_packet(&pkt); +} // this is weird. but it seems to put a new frame onto the queue void H264EncoderImpl::storage_task_enqueue(storage_task task) { unique_lock lock(storage_task_queue_mutex); storage_task_queue.push(move(task)); - srcsurface_status[task.display_order % SURFACE_NUM] = SRC_SURFACE_IN_ENCODING; storage_task_queue_changed.notify_all(); } @@ -1767,18 +1816,25 @@ void H264EncoderImpl::storage_task_thread() int H264EncoderImpl::release_encode() { - int i; - - for (i = 0; i < SURFACE_NUM; i++) { - vaDestroyBuffer(va_dpy, gl_surfaces[i].coded_buf); - vaDestroySurfaces(va_dpy, &gl_surfaces[i].src_surface, 1); - vaDestroySurfaces(va_dpy, &gl_surfaces[i].ref_surface, 1); - } - - vaDestroyContext(va_dpy, context_id); - vaDestroyConfig(va_dpy, config_id); + for (unsigned i = 0; i < SURFACE_NUM; i++) { + vaDestroyBuffer(va_dpy, gl_surfaces[i].coded_buf); + vaDestroySurfaces(va_dpy, &gl_surfaces[i].src_surface, 1); + vaDestroySurfaces(va_dpy, &gl_surfaces[i].ref_surface, 1); + + if (!use_zerocopy) { + glBindBuffer(GL_PIXEL_PACK_BUFFER, gl_surfaces[i].pbo); + glUnmapBuffer(GL_PIXEL_PACK_BUFFER); + glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); + glDeleteBuffers(1, &gl_surfaces[i].pbo); + } + glDeleteTextures(1, &gl_surfaces[i].y_tex); + glDeleteTextures(1, &gl_surfaces[i].cbcr_tex); + } - return 0; + vaDestroyContext(va_dpy, context_id); + vaDestroyConfig(va_dpy, config_id); + + return 0; } int H264EncoderImpl::deinit_va() @@ -1790,31 +1846,80 @@ int H264EncoderImpl::deinit_va() return 0; } +namespace { -H264EncoderImpl::H264EncoderImpl(QSurface *surface, int width, int height, HTTPD *httpd) - : current_storage_frame(0), surface(surface), httpd(httpd) +void init_audio_encoder(const string &codec_name, int bit_rate, AVCodecContext **ctx, AVAudioResampleContext **resampler) { - AVCodec *codec_audio = avcodec_find_encoder(AUDIO_OUTPUT_CODEC); - context_audio = avcodec_alloc_context3(codec_audio); - context_audio->bit_rate = AUDIO_OUTPUT_BIT_RATE; + AVCodec *codec_audio = avcodec_find_encoder_by_name(codec_name.c_str()); + if (codec_audio == nullptr) { + fprintf(stderr, "ERROR: Could not find codec '%s'\n", codec_name.c_str()); + exit(1); + } + + AVCodecContext *context_audio = avcodec_alloc_context3(codec_audio); + context_audio->bit_rate = bit_rate; context_audio->sample_rate = OUTPUT_FREQUENCY; - context_audio->sample_fmt = AUDIO_OUTPUT_SAMPLE_FMT; + context_audio->sample_fmt = codec_audio->sample_fmts[0]; context_audio->channels = 2; context_audio->channel_layout = AV_CH_LAYOUT_STEREO; context_audio->time_base = AVRational{1, TIMEBASE}; + context_audio->flags |= CODEC_FLAG_GLOBAL_HEADER; if (avcodec_open2(context_audio, codec_audio, NULL) < 0) { - fprintf(stderr, "Could not open codec\n"); + fprintf(stderr, "Could not open codec '%s'\n", codec_name.c_str()); + exit(1); + } + + *ctx = context_audio; + + *resampler = avresample_alloc_context(); + if (*resampler == nullptr) { + fprintf(stderr, "Allocating resampler failed.\n"); + exit(1); + } + + av_opt_set_int(*resampler, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0); + av_opt_set_int(*resampler, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); + av_opt_set_int(*resampler, "in_sample_rate", OUTPUT_FREQUENCY, 0); + av_opt_set_int(*resampler, "out_sample_rate", OUTPUT_FREQUENCY, 0); + av_opt_set_int(*resampler, "in_sample_fmt", AV_SAMPLE_FMT_FLT, 0); + av_opt_set_int(*resampler, "out_sample_fmt", context_audio->sample_fmt, 0); + + if (avresample_open(*resampler) < 0) { + fprintf(stderr, "Could not open resample context.\n"); exit(1); } +} + +} // namespace + +H264EncoderImpl::H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd) + : current_storage_frame(0), surface(surface), httpd(httpd), frame_width(width), frame_height(height) +{ + init_audio_encoder(AUDIO_OUTPUT_CODEC_NAME, DEFAULT_AUDIO_OUTPUT_BIT_RATE, &context_audio_file, &resampler_audio_file); + + if (!global_flags.stream_audio_codec_name.empty()) { + init_audio_encoder(global_flags.stream_audio_codec_name, + global_flags.stream_audio_codec_bitrate, &context_audio_stream, &resampler_audio_stream); + } - frame_width = width; - frame_height = height; frame_width_mbaligned = (frame_width + 15) & (~15); frame_height_mbaligned = (frame_height + 15) & (~15); + open_output_stream(); + + audio_frame = av_frame_alloc(); + //print_input(); - init_va(); + if (global_flags.uncompressed_video_to_http || + global_flags.x264_video_to_http) { + reorderer.reset(new FrameReorderer(ip_period - 1, frame_width, frame_height)); + } + if (global_flags.x264_video_to_http) { + x264_encoder.reset(new X264Encoder(stream_mux.get())); + } + + init_va(va_display); setup_encode(); // No frames are ready yet. @@ -1841,30 +1946,27 @@ H264EncoderImpl::H264EncoderImpl(QSurface *surface, int width, int height, HTTPD H264EncoderImpl::~H264EncoderImpl() { - { - unique_lock lock(frame_queue_mutex); - encode_thread_should_quit = true; - frame_queue_nonempty.notify_all(); - } - encode_thread.join(); - { - unique_lock lock(storage_task_queue_mutex); - storage_thread_should_quit = true; - frame_queue_nonempty.notify_all(); - storage_task_queue_changed.notify_all(); - } - storage_thread.join(); - - release_encode(); - deinit_va(); + shutdown(); + av_frame_free(&audio_frame); + avresample_free(&resampler_audio_file); + avresample_free(&resampler_audio_stream); + avcodec_free_context(&context_audio_file); + avcodec_free_context(&context_audio_stream); + close_output_stream(); } bool H264EncoderImpl::begin_frame(GLuint *y_tex, GLuint *cbcr_tex) { + assert(!is_shutdown); { // Wait until this frame slot is done encoding. unique_lock lock(storage_task_queue_mutex); + if (srcsurface_status[current_storage_frame % SURFACE_NUM] != SRC_SURFACE_FREE) { + fprintf(stderr, "Warning: Slot %d (for frame %d) is still encoding, rendering has to wait for H.264 encoder\n", + current_storage_frame % SURFACE_NUM, current_storage_frame); + } storage_task_queue_changed.wait(lock, [this]{ return storage_thread_should_quit || (srcsurface_status[current_storage_frame % SURFACE_NUM] == SRC_SURFACE_FREE); }); + srcsurface_status[current_storage_frame % SURFACE_NUM] = SRC_SURFACE_IN_ENCODING; if (storage_thread_should_quit) return false; } @@ -1873,58 +1975,60 @@ bool H264EncoderImpl::begin_frame(GLuint *y_tex, GLuint *cbcr_tex) *y_tex = surf->y_tex; *cbcr_tex = surf->cbcr_tex; - VASurfaceID surface = surf->src_surface; - VAStatus va_status = vaDeriveImage(va_dpy, surface, &surf->surface_image); - CHECK_VASTATUS(va_status, "vaDeriveImage"); - - VABufferInfo buf_info; - buf_info.mem_type = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME; // or VA_SURFACE_ATTRIB_MEM_TYPE_KERNEL_DRM? - va_status = vaAcquireBufferHandle(va_dpy, surf->surface_image.buf, &buf_info); - CHECK_VASTATUS(va_status, "vaAcquireBufferHandle"); - - // Create Y image. - surf->y_egl_image = EGL_NO_IMAGE_KHR; - EGLint y_attribs[] = { - EGL_WIDTH, frame_width, - EGL_HEIGHT, frame_height, - EGL_LINUX_DRM_FOURCC_EXT, fourcc_code('R', '8', ' ', ' '), - EGL_DMA_BUF_PLANE0_FD_EXT, EGLint(buf_info.handle), - EGL_DMA_BUF_PLANE0_OFFSET_EXT, EGLint(surf->surface_image.offsets[0]), - EGL_DMA_BUF_PLANE0_PITCH_EXT, EGLint(surf->surface_image.pitches[0]), - EGL_NONE - }; - - surf->y_egl_image = eglCreateImageKHR(eglGetCurrentDisplay(), EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, y_attribs); - assert(surf->y_egl_image != EGL_NO_IMAGE_KHR); - - // Associate Y image to a texture. - glBindTexture(GL_TEXTURE_2D, *y_tex); - glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, surf->y_egl_image); - - // Create CbCr image. - surf->cbcr_egl_image = EGL_NO_IMAGE_KHR; - EGLint cbcr_attribs[] = { - EGL_WIDTH, frame_width, - EGL_HEIGHT, frame_height, - EGL_LINUX_DRM_FOURCC_EXT, fourcc_code('G', 'R', '8', '8'), - EGL_DMA_BUF_PLANE0_FD_EXT, EGLint(buf_info.handle), - EGL_DMA_BUF_PLANE0_OFFSET_EXT, EGLint(surf->surface_image.offsets[1]), - EGL_DMA_BUF_PLANE0_PITCH_EXT, EGLint(surf->surface_image.pitches[1]), - EGL_NONE - }; - - surf->cbcr_egl_image = eglCreateImageKHR(eglGetCurrentDisplay(), EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, cbcr_attribs); - assert(surf->cbcr_egl_image != EGL_NO_IMAGE_KHR); - - // Associate CbCr image to a texture. - glBindTexture(GL_TEXTURE_2D, *cbcr_tex); - glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, surf->cbcr_egl_image); + VAStatus va_status = vaDeriveImage(va_dpy, surf->src_surface, &surf->surface_image); + CHECK_VASTATUS(va_status, "vaDeriveImage"); + + if (use_zerocopy) { + VABufferInfo buf_info; + buf_info.mem_type = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME; // or VA_SURFACE_ATTRIB_MEM_TYPE_KERNEL_DRM? + va_status = vaAcquireBufferHandle(va_dpy, surf->surface_image.buf, &buf_info); + CHECK_VASTATUS(va_status, "vaAcquireBufferHandle"); + + // Create Y image. + surf->y_egl_image = EGL_NO_IMAGE_KHR; + EGLint y_attribs[] = { + EGL_WIDTH, frame_width, + EGL_HEIGHT, frame_height, + EGL_LINUX_DRM_FOURCC_EXT, fourcc_code('R', '8', ' ', ' '), + EGL_DMA_BUF_PLANE0_FD_EXT, EGLint(buf_info.handle), + EGL_DMA_BUF_PLANE0_OFFSET_EXT, EGLint(surf->surface_image.offsets[0]), + EGL_DMA_BUF_PLANE0_PITCH_EXT, EGLint(surf->surface_image.pitches[0]), + EGL_NONE + }; + + surf->y_egl_image = eglCreateImageKHR(eglGetCurrentDisplay(), EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, y_attribs); + assert(surf->y_egl_image != EGL_NO_IMAGE_KHR); + + // Associate Y image to a texture. + glBindTexture(GL_TEXTURE_2D, *y_tex); + glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, surf->y_egl_image); + + // Create CbCr image. + surf->cbcr_egl_image = EGL_NO_IMAGE_KHR; + EGLint cbcr_attribs[] = { + EGL_WIDTH, frame_width, + EGL_HEIGHT, frame_height, + EGL_LINUX_DRM_FOURCC_EXT, fourcc_code('G', 'R', '8', '8'), + EGL_DMA_BUF_PLANE0_FD_EXT, EGLint(buf_info.handle), + EGL_DMA_BUF_PLANE0_OFFSET_EXT, EGLint(surf->surface_image.offsets[1]), + EGL_DMA_BUF_PLANE0_PITCH_EXT, EGLint(surf->surface_image.pitches[1]), + EGL_NONE + }; + + surf->cbcr_egl_image = eglCreateImageKHR(eglGetCurrentDisplay(), EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, cbcr_attribs); + assert(surf->cbcr_egl_image != EGL_NO_IMAGE_KHR); + + // Associate CbCr image to a texture. + glBindTexture(GL_TEXTURE_2D, *cbcr_tex); + glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, surf->cbcr_egl_image); + } return true; } void H264EncoderImpl::add_audio(int64_t pts, vector audio) { + assert(!is_shutdown); { unique_lock lock(frame_queue_mutex); pending_audio_frames[pts] = move(audio); @@ -1932,14 +2036,163 @@ void H264EncoderImpl::add_audio(int64_t pts, vector audio) frame_queue_nonempty.notify_all(); } -void H264EncoderImpl::end_frame(RefCountedGLsync fence, int64_t pts, const vector &input_frames) +RefCountedGLsync H264EncoderImpl::end_frame(int64_t pts, const vector &input_frames) { + assert(!is_shutdown); + + if (!use_zerocopy) { + GLSurface *surf = &gl_surfaces[current_storage_frame % SURFACE_NUM]; + + glPixelStorei(GL_PACK_ROW_LENGTH, 0); + check_error(); + + glBindBuffer(GL_PIXEL_PACK_BUFFER, surf->pbo); + check_error(); + + glBindTexture(GL_TEXTURE_2D, surf->y_tex); + check_error(); + glGetTexImage(GL_TEXTURE_2D, 0, GL_RED, GL_UNSIGNED_BYTE, BUFFER_OFFSET(surf->y_offset)); + check_error(); + + glBindTexture(GL_TEXTURE_2D, surf->cbcr_tex); + check_error(); + glGetTexImage(GL_TEXTURE_2D, 0, GL_RG, GL_UNSIGNED_BYTE, BUFFER_OFFSET(surf->cbcr_offset)); + check_error(); + + glBindTexture(GL_TEXTURE_2D, 0); + check_error(); + glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); + check_error(); + + glMemoryBarrier(GL_TEXTURE_UPDATE_BARRIER_BIT | GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT); + check_error(); + } + + RefCountedGLsync fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0); + check_error(); + glFlush(); // Make the H.264 thread see the fence as soon as possible. + check_error(); + { unique_lock lock(frame_queue_mutex); pending_video_frames[current_storage_frame] = PendingFrame{ fence, input_frames, pts }; ++current_storage_frame; } frame_queue_nonempty.notify_all(); + return fence; +} + +void H264EncoderImpl::shutdown() +{ + if (is_shutdown) { + return; + } + + { + unique_lock lock(frame_queue_mutex); + encode_thread_should_quit = true; + frame_queue_nonempty.notify_all(); + } + encode_thread.join(); + { + unique_lock lock(storage_task_queue_mutex); + storage_thread_should_quit = true; + frame_queue_nonempty.notify_all(); + storage_task_queue_changed.notify_all(); + } + storage_thread.join(); + + release_encode(); + deinit_va(); + is_shutdown = true; +} + +void H264EncoderImpl::open_output_file(const std::string &filename) +{ + AVFormatContext *avctx = avformat_alloc_context(); + avctx->oformat = av_guess_format(NULL, filename.c_str(), NULL); + assert(filename.size() < sizeof(avctx->filename) - 1); + strcpy(avctx->filename, filename.c_str()); + + string url = "file:" + filename; + int ret = avio_open2(&avctx->pb, url.c_str(), AVIO_FLAG_WRITE, &avctx->interrupt_callback, NULL); + if (ret < 0) { + char tmp[AV_ERROR_MAX_STRING_SIZE]; + fprintf(stderr, "%s: avio_open2() failed: %s\n", filename.c_str(), av_make_error_string(tmp, sizeof(tmp), ret)); + exit(1); + } + + file_mux.reset(new Mux(avctx, frame_width, frame_height, Mux::CODEC_H264, context_audio_file->codec, TIMEBASE, DEFAULT_AUDIO_OUTPUT_BIT_RATE, nullptr)); +} + +void H264EncoderImpl::close_output_file() +{ + file_mux.reset(); +} + +void H264EncoderImpl::open_output_stream() +{ + AVFormatContext *avctx = avformat_alloc_context(); + AVOutputFormat *oformat = av_guess_format(global_flags.stream_mux_name.c_str(), nullptr, nullptr); + assert(oformat != nullptr); + avctx->oformat = oformat; + + string codec_name; + int bit_rate; + + if (global_flags.stream_audio_codec_name.empty()) { + codec_name = AUDIO_OUTPUT_CODEC_NAME; + bit_rate = DEFAULT_AUDIO_OUTPUT_BIT_RATE; + } else { + codec_name = global_flags.stream_audio_codec_name; + bit_rate = global_flags.stream_audio_codec_bitrate; + } + + uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE); + avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, &H264EncoderImpl::write_packet_thunk, nullptr); + + Mux::Codec video_codec; + if (global_flags.uncompressed_video_to_http) { + video_codec = Mux::CODEC_NV12; + } else { + video_codec = Mux::CODEC_H264; + } + + avctx->flags = AVFMT_FLAG_CUSTOM_IO; + AVCodec *codec_audio = avcodec_find_encoder_by_name(codec_name.c_str()); + if (codec_audio == nullptr) { + fprintf(stderr, "ERROR: Could not find codec '%s'\n", codec_name.c_str()); + exit(1); + } + + int time_base = global_flags.stream_coarse_timebase ? COARSE_TIMEBASE : TIMEBASE; + stream_mux_writing_header = true; + stream_mux.reset(new Mux(avctx, frame_width, frame_height, video_codec, codec_audio, time_base, bit_rate, this)); + stream_mux_writing_header = false; + httpd->set_header(stream_mux_header); + stream_mux_header.clear(); +} + +void H264EncoderImpl::close_output_stream() +{ + stream_mux.reset(); +} + +int H264EncoderImpl::write_packet_thunk(void *opaque, uint8_t *buf, int buf_size) +{ + H264EncoderImpl *h264_encoder = (H264EncoderImpl *)opaque; + return h264_encoder->write_packet(buf, buf_size); +} + +int H264EncoderImpl::write_packet(uint8_t *buf, int buf_size) +{ + if (stream_mux_writing_header) { + stream_mux_header.append((char *)buf, buf_size); + } else { + httpd->add_data((char *)buf, buf_size, stream_mux_writing_keyframes); + stream_mux_writing_keyframes = false; + } + return buf_size; } void H264EncoderImpl::encode_thread_func() @@ -2006,30 +2259,108 @@ void H264EncoderImpl::encode_remaining_frames_as_p(int encoding_frame_num, int g encode_frame(frame, encoding_frame_num++, display_frame_num, gop_start_display_frame_num, FRAME_P, frame.pts, dts); last_dts = dts; } + + if (global_flags.uncompressed_video_to_http || + global_flags.x264_video_to_http) { + // Add frames left in reorderer. + while (!reorderer->empty()) { + pair output_frame = reorderer->get_first_frame(); + if (global_flags.uncompressed_video_to_http) { + add_packet_for_uncompressed_frame(output_frame.first, output_frame.second); + } else { + assert(global_flags.x264_video_to_http); + x264_encoder->add_frame(output_frame.first, output_frame.second); + } + } + } +} + +void H264EncoderImpl::add_packet_for_uncompressed_frame(int64_t pts, const uint8_t *data) +{ + AVPacket pkt; + memset(&pkt, 0, sizeof(pkt)); + pkt.buf = nullptr; + pkt.data = const_cast(data); + pkt.size = frame_width * frame_height * 2; + pkt.stream_index = 0; + pkt.flags = AV_PKT_FLAG_KEY; + stream_mux->add_packet(pkt, pts, pts); } +namespace { + +void memcpy_with_pitch(uint8_t *dst, const uint8_t *src, size_t src_width, size_t dst_pitch, size_t height) +{ + if (src_width == dst_pitch) { + memcpy(dst, src, src_width * height); + } else { + for (size_t y = 0; y < height; ++y) { + const uint8_t *sptr = src + y * src_width; + uint8_t *dptr = dst + y * dst_pitch; + memcpy(dptr, sptr, src_width); + } + } +} + +} // namespace + void H264EncoderImpl::encode_frame(H264EncoderImpl::PendingFrame frame, int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num, int frame_type, int64_t pts, int64_t dts) { // Wait for the GPU to be done with the frame. - glClientWaitSync(frame.fence.get(), 0, 0); + GLenum sync_status; + do { + sync_status = glClientWaitSync(frame.fence.get(), 0, 1000000000); + check_error(); + } while (sync_status == GL_TIMEOUT_EXPIRED); + assert(sync_status != GL_WAIT_FAILED); // Release back any input frames we needed to render this frame. frame.input_frames.clear(); - // Unmap the image. GLSurface *surf = &gl_surfaces[display_frame_num % SURFACE_NUM]; - eglDestroyImageKHR(eglGetCurrentDisplay(), surf->y_egl_image); - eglDestroyImageKHR(eglGetCurrentDisplay(), surf->cbcr_egl_image); - VAStatus va_status = vaReleaseBufferHandle(va_dpy, surf->surface_image.buf); - CHECK_VASTATUS(va_status, "vaReleaseBufferHandle"); + VAStatus va_status; + + if (use_zerocopy) { + eglDestroyImageKHR(eglGetCurrentDisplay(), surf->y_egl_image); + eglDestroyImageKHR(eglGetCurrentDisplay(), surf->cbcr_egl_image); + va_status = vaReleaseBufferHandle(va_dpy, surf->surface_image.buf); + CHECK_VASTATUS(va_status, "vaReleaseBufferHandle"); + } else { + unsigned char *surface_p = nullptr; + vaMapBuffer(va_dpy, surf->surface_image.buf, (void **)&surface_p); + + unsigned char *va_y_ptr = (unsigned char *)surface_p + surf->surface_image.offsets[0]; + memcpy_with_pitch(va_y_ptr, surf->y_ptr, frame_width, surf->surface_image.pitches[0], frame_height); + + unsigned char *va_cbcr_ptr = (unsigned char *)surface_p + surf->surface_image.offsets[1]; + memcpy_with_pitch(va_cbcr_ptr, surf->cbcr_ptr, (frame_width / 2) * sizeof(uint16_t), surf->surface_image.pitches[1], frame_height / 2); + + va_status = vaUnmapBuffer(va_dpy, surf->surface_image.buf); + CHECK_VASTATUS(va_status, "vaUnmapBuffer"); + + if (global_flags.uncompressed_video_to_http || + global_flags.x264_video_to_http) { + // Add uncompressed video. (Note that pts == dts here.) + // Delay needs to match audio. + pair output_frame = reorderer->reorder_frame(pts + global_delay(), reinterpret_cast(surf->y_ptr)); + if (output_frame.second != nullptr) { + if (global_flags.uncompressed_video_to_http) { + add_packet_for_uncompressed_frame(output_frame.first, output_frame.second); + } else { + assert(global_flags.x264_video_to_http); + x264_encoder->add_frame(output_frame.first, output_frame.second); + } + } + } + } + va_status = vaDestroyImage(va_dpy, surf->surface_image.image_id); CHECK_VASTATUS(va_status, "vaDestroyImage"); - VASurfaceID surface = surf->src_surface; - // Schedule the frame for encoding. - va_status = vaBeginPicture(va_dpy, context_id, surface); + VASurfaceID va_surface = surf->src_surface; + va_status = vaBeginPicture(va_dpy, context_id, va_surface); CHECK_VASTATUS(va_status, "vaBeginPicture"); if (frame_type == FRAME_IDR) { @@ -2061,13 +2392,13 @@ void H264EncoderImpl::encode_frame(H264EncoderImpl::PendingFrame frame, int enco } // Proxy object. -H264Encoder::H264Encoder(QSurface *surface, int width, int height, HTTPD *httpd) - : impl(new H264EncoderImpl(surface, width, height, httpd)) {} +H264Encoder::H264Encoder(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd) + : impl(new H264EncoderImpl(surface, va_display, width, height, httpd)) {} // Must be defined here because unique_ptr<> destructor needs to know the impl. H264Encoder::~H264Encoder() {} -void H264Encoder::add_audio(int64_t pts, std::vector audio) +void H264Encoder::add_audio(int64_t pts, vector audio) { impl->add_audio(pts, audio); } @@ -2077,9 +2408,22 @@ bool H264Encoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex) return impl->begin_frame(y_tex, cbcr_tex); } -void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const std::vector &input_frames) +RefCountedGLsync H264Encoder::end_frame(int64_t pts, const vector &input_frames) { - impl->end_frame(fence, pts, input_frames); + return impl->end_frame(pts, input_frames); } -// Real class. +void H264Encoder::shutdown() +{ + impl->shutdown(); +} + +void H264Encoder::open_output_file(const std::string &filename) +{ + impl->open_output_file(filename); +} + +void H264Encoder::close_output_file() +{ + impl->close_output_file(); +}