]> git.sesse.net Git - nageru/blobdiff - h264encode.cpp
Make it possible for file and HTTP streams to use different audio codecs.
[nageru] / h264encode.cpp
index 5fa01bd48d29ab4a2286ed88117544989961a9ef..8f4508835c0a2f3d57a02c5e5c52f258f73344c2 100644 (file)
@@ -1,33 +1,49 @@
 //#include "sysdeps.h"
 #include "h264encode.h"
 
+#include <movit/util.h>
 #include <EGL/eglplatform.h>
 #include <X11/X.h>
 #include <X11/Xlib.h>
 #include <assert.h>
 #include <epoxy/egl.h>
+extern "C" {
 #include <libavcodec/avcodec.h>
-#include <libavformat/avio.h>
-#include <libavutil/mathematics.h>
+#include <libavformat/avformat.h>
+#include <libavutil/channel_layout.h>
+#include <libavutil/frame.h>
 #include <libavutil/rational.h>
+#include <libavutil/samplefmt.h>
+}
 #include <libdrm/drm_fourcc.h>
-#include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <fcntl.h>
 #include <va/va.h>
+#include <va/va_drm.h>
 #include <va/va_drmcommon.h>
 #include <va/va_enc_h264.h>
 #include <va/va_x11.h>
+#include <algorithm>
 #include <condition_variable>
+#include <cstdint>
+#include <map>
+#include <memory>
 #include <mutex>
 #include <queue>
 #include <string>
 #include <thread>
+#include <utility>
 
 #include "context.h"
+#include "defs.h"
+#include "flags.h"
+#include "httpd.h"
 #include "timebase.h"
 
+using namespace std;
+
 class QOpenGLContext;
 class QSurface;
 
@@ -37,6 +53,8 @@ class QSurface;
         exit(1);                                                        \
     }
 
+#define BUFFER_OFFSET(i) ((char *)NULL + (i))
+
 //#include "loadsurface.h"
 
 #define NAL_REF_IDC_NONE        0
@@ -66,61 +84,15 @@ class QSurface;
 #define PROFILE_IDC_HIGH        100
    
 #define BITSTREAM_ALLOCATE_STEPPING     4096
-
 #define SURFACE_NUM 16 /* 16 surfaces for source YUV */
-static  VADisplay va_dpy;
-static  VAProfile h264_profile = (VAProfile)~0;
-static  VAConfigAttrib config_attrib[VAConfigAttribTypeMax];
-static  int config_attrib_num = 0, enc_packed_header_idx;
-
-struct GLSurface {
-       VASurfaceID src_surface, ref_surface;
-       VABufferID coded_buf;
-
-       VAImage surface_image;
-       GLuint y_tex, cbcr_tex;
-       EGLImage y_egl_image, cbcr_egl_image;
-};
-GLSurface gl_surfaces[SURFACE_NUM];
-
-static  VAConfigID config_id;
-static  VAContextID context_id;
-static  VAEncSequenceParameterBufferH264 seq_param;
-static  VAEncPictureParameterBufferH264 pic_param;
-static  VAEncSliceParameterBufferH264 slice_param;
-static  VAPictureH264 CurrentCurrPic;
-static  VAPictureH264 ReferenceFrames[16], RefPicList0_P[32], RefPicList0_B[32], RefPicList1_B[32];
-
-static  unsigned int MaxFrameNum = (2<<16);
-static  unsigned int MaxPicOrderCntLsb = (2<<8);
-static  unsigned int Log2MaxFrameNum = 16;
-static  unsigned int Log2MaxPicOrderCntLsb = 8;
-
-static  unsigned int num_ref_frames = 2;
-static  unsigned int numShortTerm = 0;
-static  int constraint_set_flag = 0;
-static  int h264_packedheader = 0; /* support pack header? */
-static  int h264_maxref = (1<<16|1);
-static  int h264_entropy_mode = 1; /* cabac */
-
-static  char *coded_fn = NULL;
-
-static  int frame_width = 176;
-static  int frame_height = 144;
-static  int frame_width_mbaligned;
-static  int frame_height_mbaligned;
-static  int frame_rate = 60;
-static  unsigned int frame_bitrate = 0;
-static  unsigned int frame_slices = 1;
-static  double frame_size = 0;
-static  int initial_qp = 15;
-//static  int initial_qp = 28;
-static  int minimal_qp = 0;
-static  int intra_period = 30;
-static  int intra_idr_period = 60;
-static  int ip_period = 3;
-static  int rc_mode = -1;
-static  int rc_default_modes[] = {
+#define MAX_NUM_REF1 16 // Seemingly a hardware-fixed value, not related to SURFACE_NUM
+#define MAX_NUM_REF2 32 // Seemingly a hardware-fixed value, not related to SURFACE_NUM
+
+static constexpr unsigned int MaxFrameNum = (2<<16);
+static constexpr unsigned int MaxPicOrderCntLsb = (2<<8);
+static constexpr unsigned int Log2MaxFrameNum = 16;
+static constexpr unsigned int Log2MaxPicOrderCntLsb = 8;
+static constexpr int rc_default_modes[] = {  // Priority list of modes.
     VA_RC_VBR,
     VA_RC_CQP,
     VA_RC_VBR_CONSTRAINED,
@@ -128,14 +100,6 @@ static  int rc_default_modes[] = {
     VA_RC_VCM,
     VA_RC_NONE,
 };
-static  unsigned long long current_frame_encoding = 0;
-static  unsigned long long current_frame_display = 0;
-static  unsigned long long current_IDR_display = 0;
-static  unsigned int current_frame_num = 0;
-static  int current_frame_type;
-
-static  int misc_priv_type = 0;
-static  int misc_priv_value = 0;
 
 /* thread to save coded data */
 #define SRC_SURFACE_FREE        0
@@ -150,6 +114,258 @@ typedef struct __bitstream bitstream;
 
 using namespace std;
 
+// H.264 video comes out in encoding order (e.g. with two B-frames:
+// 0, 3, 1, 2, 6, 4, 5, etc.), but uncompressed video needs to
+// come in the right order. Since we do everything, including waiting
+// for the frames to come out of OpenGL, in encoding order, we need
+// a reordering buffer for uncompressed frames so that they come out
+// correctly. We go the super-lazy way of not making it understand
+// anything about the true order (which introduces some extra latency,
+// though); we know that for N B-frames we need at most (N-1) frames
+// in the reorder buffer, and can just sort on that.
+//
+// The class also deals with keeping a freelist as needed.
+class FrameReorderer {
+public:
+       FrameReorderer(unsigned queue_length, int width, int height);
+
+       // Returns the next frame to insert with its pts, if any. Otherwise -1 and nullptr.
+       // Does _not_ take ownership of data; a copy is taken if needed.
+       // The returned pointer is valid until the next call to reorder_frame, or destruction.
+       // As a special case, if queue_length == 0, will just return pts and data (no reordering needed).
+       pair<int64_t, const uint8_t *> reorder_frame(int64_t pts, const uint8_t *data);
+
+       // The same as reorder_frame, but without inserting anything. Used to empty the queue.
+       pair<int64_t, const uint8_t *> get_first_frame();
+
+       bool empty() const { return frames.empty(); }
+
+private:
+       unsigned queue_length;
+       int width, height;
+
+       priority_queue<pair<int64_t, uint8_t *>> frames;
+       stack<uint8_t *> freelist;  // Includes the last value returned from reorder_frame.
+
+       // Owns all the pointers. Normally, freelist and frames could do this themselves,
+       // except priority_queue doesn't work well with movable-only types.
+       vector<unique_ptr<uint8_t[]>> owner;
+};
+
+FrameReorderer::FrameReorderer(unsigned queue_length, int width, int height)
+    : queue_length(queue_length), width(width), height(height)
+{
+       for (unsigned i = 0; i < queue_length; ++i) {
+               owner.emplace_back(new uint8_t[width * height * 2]);
+               freelist.push(owner.back().get());
+       }
+}
+
+pair<int64_t, const uint8_t *> FrameReorderer::reorder_frame(int64_t pts, const uint8_t *data)
+{
+       if (queue_length == 0) {
+               return make_pair(pts, data);
+       }
+
+       assert(!freelist.empty());
+       uint8_t *storage = freelist.top();
+       freelist.pop();
+       memcpy(storage, data, width * height * 2);
+       frames.emplace(-pts, storage);  // Invert pts to get smallest first.
+
+       if (frames.size() >= queue_length) {
+               return get_first_frame();
+       } else {
+               return make_pair(-1, nullptr);
+       }
+}
+
+pair<int64_t, const uint8_t *> FrameReorderer::get_first_frame()
+{
+       assert(!frames.empty());
+       pair<int64_t, uint8_t *> storage = frames.top();
+       frames.pop();
+       int64_t pts = storage.first;
+       freelist.push(storage.second);
+       return make_pair(-pts, storage.second);  // Re-invert pts (see reorder_frame()).
+}
+
+class H264EncoderImpl {
+public:
+       H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd);
+       ~H264EncoderImpl();
+       void add_audio(int64_t pts, vector<float> audio);
+       bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex);
+       RefCountedGLsync end_frame(int64_t pts, const vector<RefCountedFrame> &input_frames);
+       void shutdown();
+       void open_output_file(const std::string &filename);
+       void close_output_file();
+
+private:
+       struct storage_task {
+               unsigned long long display_order;
+               int frame_type;
+               vector<float> audio;
+               int64_t pts, dts;
+       };
+       struct PendingFrame {
+               RefCountedGLsync fence;
+               vector<RefCountedFrame> input_frames;
+               int64_t pts;
+       };
+
+       // So we never get negative dts.
+       int64_t global_delay() const {
+               return int64_t(ip_period - 1) * (TIMEBASE / MAX_FPS);
+       }
+
+       void encode_thread_func();
+       void encode_remaining_frames_as_p(int encoding_frame_num, int gop_start_display_frame_num, int64_t last_dts);
+       void add_packet_for_uncompressed_frame(int64_t pts, const uint8_t *data);
+       void encode_frame(PendingFrame frame, int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num,
+                         int frame_type, int64_t pts, int64_t dts);
+       void storage_task_thread();
+       void encode_audio(const vector<float> &audio,
+                         vector<float> *audio_queue,
+                         int64_t audio_pts,
+                         AVCodecContext *ctx,
+                         const vector<PacketDestination *> &destinations);
+       void encode_audio_one_frame(const float *audio,
+                                   size_t num_samples,  // In each channel.
+                                   int64_t audio_pts,
+                                   AVCodecContext *ctx,
+                                   const vector<PacketDestination *> &destinations);
+       void storage_task_enqueue(storage_task task);
+       void save_codeddata(storage_task task);
+       int render_packedsequence();
+       int render_packedpicture();
+       void render_packedslice();
+       int render_sequence();
+       int render_picture(int frame_type, int display_frame_num, int gop_start_display_frame_num);
+       void sps_rbsp(bitstream *bs);
+       void pps_rbsp(bitstream *bs);
+       int build_packed_pic_buffer(unsigned char **header_buffer);
+       int render_slice(int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num, int frame_type);
+       void slice_header(bitstream *bs);
+       int build_packed_seq_buffer(unsigned char **header_buffer);
+       int build_packed_slice_buffer(unsigned char **header_buffer);
+       int init_va(const string &va_display);
+       int deinit_va();
+       void enable_zerocopy_if_possible();
+       VADisplay va_open_display(const string &va_display);
+       void va_close_display(VADisplay va_dpy);
+       int setup_encode();
+       int release_encode();
+       void update_ReferenceFrames(int frame_type);
+       int update_RefPicList(int frame_type);
+
+       bool is_shutdown = false;
+       bool use_zerocopy;
+       int drm_fd = -1;
+
+       thread encode_thread, storage_thread;
+
+       mutex storage_task_queue_mutex;
+       condition_variable storage_task_queue_changed;
+       int srcsurface_status[SURFACE_NUM];  // protected by storage_task_queue_mutex
+       queue<storage_task> storage_task_queue;  // protected by storage_task_queue_mutex
+       bool storage_thread_should_quit = false;  // protected by storage_task_queue_mutex
+
+       mutex frame_queue_mutex;
+       condition_variable frame_queue_nonempty;
+       bool encode_thread_should_quit = false;  // under frame_queue_mutex
+
+       int current_storage_frame;
+
+       map<int, PendingFrame> pending_video_frames;  // under frame_queue_mutex
+       map<int64_t, vector<float>> pending_audio_frames;  // under frame_queue_mutex
+       QSurface *surface;
+
+       AVCodecContext *context_audio_file;
+       AVCodecContext *context_audio_stream = nullptr;  // nullptr = don't code separate audio for stream.
+
+       vector<float> audio_queue_file;
+       vector<float> audio_queue_stream;
+
+       AVFrame *audio_frame = nullptr;
+       HTTPD *httpd;
+       unique_ptr<FrameReorderer> reorderer;
+
+       Display *x11_display = nullptr;
+
+       // Encoder parameters
+       VADisplay va_dpy;
+       VAProfile h264_profile = (VAProfile)~0;
+       VAConfigAttrib config_attrib[VAConfigAttribTypeMax];
+       int config_attrib_num = 0, enc_packed_header_idx;
+
+       struct GLSurface {
+               VASurfaceID src_surface, ref_surface;
+               VABufferID coded_buf;
+
+               VAImage surface_image;
+               GLuint y_tex, cbcr_tex;
+
+               // Only if use_zerocopy == true.
+               EGLImage y_egl_image, cbcr_egl_image;
+
+               // Only if use_zerocopy == false.
+               GLuint pbo;
+               uint8_t *y_ptr, *cbcr_ptr;
+               size_t y_offset, cbcr_offset;
+       };
+       GLSurface gl_surfaces[SURFACE_NUM];
+
+       VAConfigID config_id;
+       VAContextID context_id;
+       VAEncSequenceParameterBufferH264 seq_param;
+       VAEncPictureParameterBufferH264 pic_param;
+       VAEncSliceParameterBufferH264 slice_param;
+       VAPictureH264 CurrentCurrPic;
+       VAPictureH264 ReferenceFrames[MAX_NUM_REF1], RefPicList0_P[MAX_NUM_REF2], RefPicList0_B[MAX_NUM_REF2], RefPicList1_B[MAX_NUM_REF2];
+
+       // Static quality settings.
+       static constexpr unsigned int frame_bitrate = 15000000 / 60;  // Doesn't really matter; only initial_qp does.
+       static constexpr unsigned int num_ref_frames = 2;
+       static constexpr int initial_qp = 15;
+       static constexpr int minimal_qp = 0;
+       static constexpr int intra_period = 30;
+       static constexpr int intra_idr_period = MAX_FPS;  // About a second; more at lower frame rates. Not ideal.
+
+       // Quality settings that are meant to be static, but might be overridden
+       // by the profile.
+       int constraint_set_flag = 0;
+       int h264_packedheader = 0; /* support pack header? */
+       int h264_maxref = (1<<16|1);
+       int h264_entropy_mode = 1; /* cabac */
+       int ip_period = 3;
+
+       int rc_mode = -1;
+       unsigned int current_frame_num = 0;
+       unsigned int numShortTerm = 0;
+
+       int frame_width;
+       int frame_height;
+       int frame_width_mbaligned;
+       int frame_height_mbaligned;
+
+       unique_ptr<Mux> file_mux;  // To local disk.
+};
+
+// Supposedly vaRenderPicture() is supposed to destroy the buffer implicitly,
+// but if we don't delete it here, we get leaks. The GStreamer implementation
+// does the same.
+static void render_picture_and_delete(VADisplay dpy, VAContextID context, VABufferID *buffers, int num_buffers)
+{
+    VAStatus va_status = vaRenderPicture(dpy, context, buffers, num_buffers);
+    CHECK_VASTATUS(va_status, "vaRenderPicture");
+
+    for (int i = 0; i < num_buffers; ++i) {
+        va_status = vaDestroyBuffer(dpy, buffers[i]);
+        CHECK_VASTATUS(va_status, "vaDestroyBuffer");
+    }
+}
+
 static unsigned int 
 va_swap32(unsigned int val)
 {
@@ -197,7 +413,11 @@ bitstream_put_ui(bitstream *bs, unsigned int val, int size_in_bits)
         bs->buffer[pos] = (bs->buffer[pos] << size_in_bits | val);
     } else {
         size_in_bits -= bit_left;
-        bs->buffer[pos] = (bs->buffer[pos] << bit_left) | (val >> size_in_bits);
+        if (bit_left >= 32) {
+            bs->buffer[pos] = (val >> size_in_bits);
+        } else {
+            bs->buffer[pos] = (bs->buffer[pos] << bit_left) | (val >> size_in_bits);
+        }
         bs->buffer[pos] = va_swap32(bs->buffer[pos]);
 
         if (pos + 1 == bs->max_size_in_dword) {
@@ -276,7 +496,7 @@ static void nal_header(bitstream *bs, int nal_ref_idc, int nal_unit_type)
     bitstream_put_ui(bs, nal_unit_type, 5);
 }
 
-static void sps_rbsp(bitstream *bs)
+void H264EncoderImpl::sps_rbsp(bitstream *bs)
 {
     int profile_idc = PROFILE_IDC_BASELINE;
 
@@ -339,7 +559,17 @@ static void sps_rbsp(bitstream *bs)
         bitstream_put_ui(bs, 1, 1); /* vui_parameters_present_flag */
         bitstream_put_ui(bs, 0, 1); /* aspect_ratio_info_present_flag */
         bitstream_put_ui(bs, 0, 1); /* overscan_info_present_flag */
-        bitstream_put_ui(bs, 0, 1); /* video_signal_type_present_flag */
+        bitstream_put_ui(bs, 1, 1); /* video_signal_type_present_flag */
+        {
+            bitstream_put_ui(bs, 5, 3);  /* video_format (5 = Unspecified) */
+            bitstream_put_ui(bs, 0, 1);  /* video_full_range_flag */
+            bitstream_put_ui(bs, 1, 1);  /* colour_description_present_flag */
+            {
+                bitstream_put_ui(bs, 1, 8);  /* colour_primaries (1 = BT.709) */
+                bitstream_put_ui(bs, 2, 8);  /* transfer_characteristics (2 = unspecified, since we use sRGB) */
+                bitstream_put_ui(bs, 6, 8);  /* matrix_coefficients (6 = BT.601/SMPTE 170M) */
+            }
+        }
         bitstream_put_ui(bs, 0, 1); /* chroma_loc_info_present_flag */
         bitstream_put_ui(bs, 1, 1); /* timing_info_present_flag */
         {
@@ -374,7 +604,7 @@ static void sps_rbsp(bitstream *bs)
 }
 
 
-static void pps_rbsp(bitstream *bs)
+void H264EncoderImpl::pps_rbsp(bitstream *bs)
 {
     bitstream_put_ue(bs, pic_param.pic_parameter_set_id);      /* pic_parameter_set_id */
     bitstream_put_ue(bs, pic_param.seq_parameter_set_id);      /* seq_parameter_set_id */
@@ -407,7 +637,7 @@ static void pps_rbsp(bitstream *bs)
     rbsp_trailing_bits(bs);
 }
 
-static void slice_header(bitstream *bs)
+void H264EncoderImpl::slice_header(bitstream *bs)
 {
     int first_mb_in_slice = slice_param.macroblock_address;
 
@@ -502,8 +732,7 @@ static void slice_header(bitstream *bs)
     }
 }
 
-static int
-build_packed_pic_buffer(unsigned char **header_buffer)
+int H264EncoderImpl::build_packed_pic_buffer(unsigned char **header_buffer)
 {
     bitstream bs;
 
@@ -517,8 +746,8 @@ build_packed_pic_buffer(unsigned char **header_buffer)
     return bs.bit_offset;
 }
 
-static int
-build_packed_seq_buffer(unsigned char **header_buffer)
+int
+H264EncoderImpl::build_packed_seq_buffer(unsigned char **header_buffer)
 {
     bitstream bs;
 
@@ -532,7 +761,7 @@ build_packed_seq_buffer(unsigned char **header_buffer)
     return bs.bit_offset;
 }
 
-static int build_packed_slice_buffer(unsigned char **header_buffer)
+int H264EncoderImpl::build_packed_slice_buffer(unsigned char **header_buffer)
 {
     bitstream bs;
     int is_idr = !!pic_param.pic_fields.bits.idr_pic_flag;
@@ -590,7 +819,7 @@ static int build_packed_slice_buffer(unsigned char **header_buffer)
 //
 // Getting pts and dts right with variable frame rate (VFR) and B-frames can be a
 // bit tricky. We assume first of all that the frame rate never goes _above_
-// <frame_rate>, which gives us a frame period N. The decoder can always decode
+// MAX_FPS, which gives us a frame period N. The decoder can always decode
 // in at least this speed, as long at dts <= pts (the frame is not attempted
 // presented before it is decoded). Furthermore, we never have longer chains of
 // B-frames than a fixed constant C. (In a B-frame chain, we say that the base
@@ -636,9 +865,9 @@ static int build_packed_slice_buffer(unsigned char **header_buffer)
 #define FRAME_I 2
 #define FRAME_IDR 7
 void encoding2display_order(
-    unsigned long long encoding_order, int intra_period,
+    int encoding_order, int intra_period,
     int intra_idr_period, int ip_period,
-    unsigned long long *displaying_order,
+    int *displaying_order,
     int *frame_type, int *pts_lag)
 {
     int encoding_order_gop = 0;
@@ -683,7 +912,7 @@ void encoding2display_order(
         *displaying_order = encoding_order;
         // IDR frames are a special case; I honestly can't find the logic behind
         // why this is the right thing, but it seems to line up nicely in practice :-)
-        *pts_lag = TIMEBASE / frame_rate;
+        *pts_lag = TIMEBASE / MAX_FPS;
     } else if (((encoding_order_gop - 1) % ip_period) != 0) { /* B frames */
         *frame_type = FRAME_B;
         *displaying_order = encoding_order - 1;
@@ -702,7 +931,7 @@ void encoding2display_order(
 }
 
 
-static const char *rc_to_string(int rcmode)
+static const char *rc_to_string(int rc_mode)
 {
     switch (rc_mode) {
     case VA_RC_NONE:
@@ -722,185 +951,57 @@ static const char *rc_to_string(int rcmode)
     }
 }
 
-#if 0
-static int process_cmdline(int argc, char *argv[])
+void H264EncoderImpl::enable_zerocopy_if_possible()
 {
-    char c;
-    const struct option long_opts[] = {
-        {"help", no_argument, NULL, 0 },
-        {"bitrate", required_argument, NULL, 1 },
-        {"minqp", required_argument, NULL, 2 },
-        {"initialqp", required_argument, NULL, 3 },
-        {"intra_period", required_argument, NULL, 4 },
-        {"idr_period", required_argument, NULL, 5 },
-        {"ip_period", required_argument, NULL, 6 },
-        {"rcmode", required_argument, NULL, 7 },
-        {"srcyuv", required_argument, NULL, 9 },
-        {"recyuv", required_argument, NULL, 10 },
-        {"fourcc", required_argument, NULL, 11 },
-        {"syncmode", no_argument, NULL, 12 },
-        {"enablePSNR", no_argument, NULL, 13 },
-        {"prit", required_argument, NULL, 14 },
-        {"priv", required_argument, NULL, 15 },
-        {"framecount", required_argument, NULL, 16 },
-        {"entropy", required_argument, NULL, 17 },
-        {"profile", required_argument, NULL, 18 },
-        {NULL, no_argument, NULL, 0 }};
-    int long_index;
-    
-    while ((c =getopt_long_only(argc, argv, "w:h:n:f:o:?", long_opts, &long_index)) != EOF) {
-        switch (c) {
-        case 'w':
-            frame_width = atoi(optarg);
-            break;
-        case 'h':
-            frame_height = atoi(optarg);
-            break;
-        case 'n':
-        case 'f':
-            frame_rate = atoi(optarg);
-            break;
-        case 'o':
-            coded_fn = strdup(optarg);
-            break;
-        case 0:
-            print_help();
-            exit(0);
-        case 1:
-            frame_bitrate = atoi(optarg);
-            break;
-        case 2:
-            minimal_qp = atoi(optarg);
-            break;
-        case 3:
-            initial_qp = atoi(optarg);
-            break;
-        case 4:
-            intra_period = atoi(optarg);
-            break;
-        case 5:
-            intra_idr_period = atoi(optarg);
-            break;
-        case 6:
-            ip_period = atoi(optarg);
-            break;
-        case 7:
-            rc_mode = string_to_rc(optarg);
-            if (rc_mode < 0) {
-                print_help();
-                exit(1);
-            }
-            break;
-        case 9:
-            srcyuv_fn = strdup(optarg);
-            break;
-        case 11:
-            srcyuv_fourcc = string_to_fourcc(optarg);
-            if (srcyuv_fourcc <= 0) {
-                print_help();
-                exit(1);
-            }
-            break;
-        case 13:
-            calc_psnr = 1;
-            break;
-        case 14:
-            misc_priv_type = strtol(optarg, NULL, 0);
-            break;
-        case 15:
-            misc_priv_value = strtol(optarg, NULL, 0);
-            break;
-        case 17:
-            h264_entropy_mode = atoi(optarg) ? 1: 0;
-            break;
-        case 18:
-            if (strncmp(optarg, "BP", 2) == 0)
-                h264_profile = VAProfileH264Baseline;
-            else if (strncmp(optarg, "MP", 2) == 0)
-                h264_profile = VAProfileH264Main;
-            else if (strncmp(optarg, "HP", 2) == 0)
-                h264_profile = VAProfileH264High;
-            else
-                h264_profile = (VAProfile)0;
-            break;
-        case ':':
-        case '?':
-            print_help();
-            exit(0);
-        }
-    }
-
-    if (ip_period < 1) {
-       printf(" ip_period must be greater than 0\n");
-        exit(0);
-    }
-    if (intra_period != 1 && intra_period % ip_period != 0) {
-       printf(" intra_period must be a multiplier of ip_period\n");
-        exit(0);        
-    }
-    if (intra_period != 0 && intra_idr_period % intra_period != 0) {
-       printf(" intra_idr_period must be a multiplier of intra_period\n");
-        exit(0);        
-    }
-
-    if (frame_bitrate == 0)
-        frame_bitrate = frame_width * frame_height * 12 * frame_rate / 50;
-        
-    if (coded_fn == NULL) {
-        struct stat buf;
-        if (stat("/tmp", &buf) == 0)
-            coded_fn = strdup("/tmp/test.264");
-        else if (stat("/sdcard", &buf) == 0)
-            coded_fn = strdup("/sdcard/test.264");
-        else
-            coded_fn = strdup("./test.264");
-    }
-    
-
-    frame_width_mbaligned = (frame_width + 15) & (~15);
-    frame_height_mbaligned = (frame_height + 15) & (~15);
-    if (frame_width != frame_width_mbaligned ||
-        frame_height != frame_height_mbaligned) {
-        printf("Source frame is %dx%d and will code clip to %dx%d with crop\n",
-               frame_width, frame_height,
-               frame_width_mbaligned, frame_height_mbaligned
-               );
-    }
-    
-    return 0;
+       if (global_flags.uncompressed_video_to_http) {
+               fprintf(stderr, "Disabling zerocopy H.264 encoding due to --uncompressed_video_to_http.\n");
+               use_zerocopy = false;
+       } else {
+               use_zerocopy = true;
+       }
 }
-#endif
-
-static Display *x11_display;
-static Window   x11_window;
 
-VADisplay
-va_open_display(void)
+VADisplay H264EncoderImpl::va_open_display(const string &va_display)
 {
-    x11_display = XOpenDisplay(NULL);
-    if (!x11_display) {
-        fprintf(stderr, "error: can't connect to X server!\n");
-        return NULL;
-    }
-    return vaGetDisplay(x11_display);
+       if (va_display.empty()) {
+               x11_display = XOpenDisplay(NULL);
+               if (!x11_display) {
+                       fprintf(stderr, "error: can't connect to X server!\n");
+                       return NULL;
+               }
+               enable_zerocopy_if_possible();
+               return vaGetDisplay(x11_display);
+       } else if (va_display[0] != '/') {
+               x11_display = XOpenDisplay(va_display.c_str());
+               if (!x11_display) {
+                       fprintf(stderr, "error: can't connect to X server!\n");
+                       return NULL;
+               }
+               enable_zerocopy_if_possible();
+               return vaGetDisplay(x11_display);
+       } else {
+               drm_fd = open(va_display.c_str(), O_RDWR);
+               if (drm_fd == -1) {
+                       perror(va_display.c_str());
+                       return NULL;
+               }
+               use_zerocopy = false;
+               return vaGetDisplayDRM(drm_fd);
+       }
 }
 
-void
-va_close_display(VADisplay va_dpy)
+void H264EncoderImpl::va_close_display(VADisplay va_dpy)
 {
-    if (!x11_display)
-        return;
-
-    if (x11_window) {
-        XUnmapWindow(x11_display, x11_window);
-        XDestroyWindow(x11_display, x11_window);
-        x11_window = None;
-    }
-    XCloseDisplay(x11_display);
-    x11_display = NULL;
+       if (x11_display) {
+               XCloseDisplay(x11_display);
+               x11_display = nullptr;
+       }
+       if (drm_fd != -1) {
+               close(drm_fd);
+       }
 }
 
-static int init_va(void)
+int H264EncoderImpl::init_va(const string &va_display)
 {
     VAProfile profile_list[]={VAProfileH264High, VAProfileH264Main, VAProfileH264Baseline, VAProfileH264ConstrainedBaseline};
     VAEntrypoint *entrypoints;
@@ -910,7 +1011,7 @@ static int init_va(void)
     VAStatus va_status;
     unsigned int i;
 
-    va_dpy = va_open_display();
+    va_dpy = va_open_display(va_display);
     va_status = vaInitialize(va_dpy, &major_ver, &minor_ver);
     CHECK_VASTATUS(va_status, "vaInitialize");
 
@@ -939,33 +1040,30 @@ static int init_va(void)
     }
     
     if (support_encode == 0) {
-        printf("Can't find VAEntrypointEncSlice for H264 profiles\n");
+        printf("Can't find VAEntrypointEncSlice for H264 profiles. If you are using a non-Intel GPU\n");
+        printf("but have one in your system, try launching Nageru with --va-display /dev/dri/renderD128\n");
+        printf("to use VA-API against DRM instead of X11.\n");
         exit(1);
     } else {
         switch (h264_profile) {
             case VAProfileH264Baseline:
-                printf("Use profile VAProfileH264Baseline\n");
                 ip_period = 1;
                 constraint_set_flag |= (1 << 0); /* Annex A.2.1 */
                 h264_entropy_mode = 0;
                 break;
             case VAProfileH264ConstrainedBaseline:
-                printf("Use profile VAProfileH264ConstrainedBaseline\n");
                 constraint_set_flag |= (1 << 0 | 1 << 1); /* Annex A.2.2 */
                 ip_period = 1;
                 break;
 
             case VAProfileH264Main:
-                printf("Use profile VAProfileH264Main\n");
                 constraint_set_flag |= (1 << 1); /* Annex A.2.2 */
                 break;
 
             case VAProfileH264High:
                 constraint_set_flag |= (1 << 3); /* Annex A.2.4 */
-                printf("Use profile VAProfileH264High\n");
                 break;
             default:
-                printf("unknow profile. Set to Baseline");
                 h264_profile = VAProfileH264Baseline;
                 ip_period = 1;
                 constraint_set_flag |= (1 << 0); /* Annex A.2.1 */
@@ -995,23 +1093,6 @@ static int init_va(void)
     if (attrib[VAConfigAttribRateControl].value != VA_ATTRIB_NOT_SUPPORTED) {
         int tmp = attrib[VAConfigAttribRateControl].value;
 
-        printf("Support rate control mode (0x%x):", tmp);
-        
-        if (tmp & VA_RC_NONE)
-            printf("NONE ");
-        if (tmp & VA_RC_CBR)
-            printf("CBR ");
-        if (tmp & VA_RC_VBR)
-            printf("VBR ");
-        if (tmp & VA_RC_VCM)
-            printf("VCM ");
-        if (tmp & VA_RC_CQP)
-            printf("CQP ");
-        if (tmp & VA_RC_VBR_CONSTRAINED)
-            printf("VBR_CONSTRAINED ");
-
-        printf("\n");
-
         if (rc_mode == -1 || !(rc_mode & tmp))  {
             if (rc_mode != -1) {
                 printf("Warning: Don't support the specified RateControl mode: %s!!!, switch to ", rc_to_string(rc_mode));
@@ -1023,8 +1104,6 @@ static int init_va(void)
                     break;
                 }
             }
-
-            printf("RateControl mode: %s\n", rc_to_string(rc_mode));
         }
 
         config_attrib[config_attrib_num].type = VAConfigAttribRateControl;
@@ -1036,29 +1115,23 @@ static int init_va(void)
     if (attrib[VAConfigAttribEncPackedHeaders].value != VA_ATTRIB_NOT_SUPPORTED) {
         int tmp = attrib[VAConfigAttribEncPackedHeaders].value;
 
-        printf("Support VAConfigAttribEncPackedHeaders\n");
-        
         h264_packedheader = 1;
         config_attrib[config_attrib_num].type = VAConfigAttribEncPackedHeaders;
         config_attrib[config_attrib_num].value = VA_ENC_PACKED_HEADER_NONE;
         
         if (tmp & VA_ENC_PACKED_HEADER_SEQUENCE) {
-            printf("Support packed sequence headers\n");
             config_attrib[config_attrib_num].value |= VA_ENC_PACKED_HEADER_SEQUENCE;
         }
         
         if (tmp & VA_ENC_PACKED_HEADER_PICTURE) {
-            printf("Support packed picture headers\n");
             config_attrib[config_attrib_num].value |= VA_ENC_PACKED_HEADER_PICTURE;
         }
         
         if (tmp & VA_ENC_PACKED_HEADER_SLICE) {
-            printf("Support packed slice headers\n");
             config_attrib[config_attrib_num].value |= VA_ENC_PACKED_HEADER_SLICE;
         }
         
         if (tmp & VA_ENC_PACKED_HEADER_MISC) {
-            printf("Support packed misc headers\n");
             config_attrib[config_attrib_num].value |= VA_ENC_PACKED_HEADER_MISC;
         }
         
@@ -1067,19 +1140,6 @@ static int init_va(void)
     }
 
     if (attrib[VAConfigAttribEncInterlaced].value != VA_ATTRIB_NOT_SUPPORTED) {
-        int tmp = attrib[VAConfigAttribEncInterlaced].value;
-        
-        printf("Support VAConfigAttribEncInterlaced\n");
-
-        if (tmp & VA_ENC_INTERLACED_FRAME)
-            printf("support VA_ENC_INTERLACED_FRAME\n");
-        if (tmp & VA_ENC_INTERLACED_FIELD)
-            printf("Support VA_ENC_INTERLACED_FIELD\n");
-        if (tmp & VA_ENC_INTERLACED_MBAFF)
-            printf("Support VA_ENC_INTERLACED_MBAFF\n");
-        if (tmp & VA_ENC_INTERLACED_PAFF)
-            printf("Support VA_ENC_INTERLACED_PAFF\n");
-        
         config_attrib[config_attrib_num].type = VAConfigAttribEncInterlaced;
         config_attrib[config_attrib_num].value = VA_ENC_PACKED_HEADER_NONE;
         config_attrib_num++;
@@ -1087,35 +1147,13 @@ static int init_va(void)
     
     if (attrib[VAConfigAttribEncMaxRefFrames].value != VA_ATTRIB_NOT_SUPPORTED) {
         h264_maxref = attrib[VAConfigAttribEncMaxRefFrames].value;
-        
-        printf("Support %d RefPicList0 and %d RefPicList1\n",
-               h264_maxref & 0xffff, (h264_maxref >> 16) & 0xffff );
-    }
-
-    if (attrib[VAConfigAttribEncMaxSlices].value != VA_ATTRIB_NOT_SUPPORTED)
-        printf("Support %d slices\n", attrib[VAConfigAttribEncMaxSlices].value);
-
-    if (attrib[VAConfigAttribEncSliceStructure].value != VA_ATTRIB_NOT_SUPPORTED) {
-        int tmp = attrib[VAConfigAttribEncSliceStructure].value;
-        
-        printf("Support VAConfigAttribEncSliceStructure\n");
-
-        if (tmp & VA_ENC_SLICE_STRUCTURE_ARBITRARY_ROWS)
-            printf("Support VA_ENC_SLICE_STRUCTURE_ARBITRARY_ROWS\n");
-        if (tmp & VA_ENC_SLICE_STRUCTURE_POWER_OF_TWO_ROWS)
-            printf("Support VA_ENC_SLICE_STRUCTURE_POWER_OF_TWO_ROWS\n");
-        if (tmp & VA_ENC_SLICE_STRUCTURE_ARBITRARY_MACROBLOCKS)
-            printf("Support VA_ENC_SLICE_STRUCTURE_ARBITRARY_MACROBLOCKS\n");
-    }
-    if (attrib[VAConfigAttribEncMacroblockInfo].value != VA_ATTRIB_NOT_SUPPORTED) {
-        printf("Support VAConfigAttribEncMacroblockInfo\n");
     }
 
     free(entrypoints);
     return 0;
 }
 
-static int setup_encode()
+int H264EncoderImpl::setup_encode()
 {
     VAStatus va_status;
     VASurfaceID *tmp_surfaceid;
@@ -1174,6 +1212,28 @@ static int setup_encode()
     for (i = 0; i < SURFACE_NUM; i++) {
         glGenTextures(1, &gl_surfaces[i].y_tex);
         glGenTextures(1, &gl_surfaces[i].cbcr_tex);
+
+        if (!use_zerocopy) {
+            // Create Y image.
+            glBindTexture(GL_TEXTURE_2D, gl_surfaces[i].y_tex);
+            glTexStorage2D(GL_TEXTURE_2D, 1, GL_R8, frame_width, frame_height);
+
+            // Create CbCr image.
+            glBindTexture(GL_TEXTURE_2D, gl_surfaces[i].cbcr_tex);
+            glTexStorage2D(GL_TEXTURE_2D, 1, GL_RG8, frame_width / 2, frame_height / 2);
+
+            // Generate a PBO to read into. It doesn't necessarily fit 1:1 with the VA-API
+            // buffers, due to potentially differing pitch.
+            glGenBuffers(1, &gl_surfaces[i].pbo);
+            glBindBuffer(GL_PIXEL_PACK_BUFFER, gl_surfaces[i].pbo);
+            glBufferStorage(GL_PIXEL_PACK_BUFFER, frame_width * frame_height * 2, nullptr, GL_MAP_READ_BIT | GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
+            uint8_t *ptr = (uint8_t *)glMapBufferRange(GL_PIXEL_PACK_BUFFER, 0, frame_width * frame_height * 2, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
+            gl_surfaces[i].y_offset = 0;
+            gl_surfaces[i].cbcr_offset = frame_width * frame_height;
+            gl_surfaces[i].y_ptr = ptr + gl_surfaces[i].y_offset;
+            gl_surfaces[i].cbcr_ptr = ptr + gl_surfaces[i].cbcr_offset;
+            glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+        }
     }
 
     for (i = 0; i < SURFACE_NUM; i++) {
@@ -1184,76 +1244,23 @@ static int setup_encode()
     return 0;
 }
 
-
-
-#define partition(ref, field, key, ascending)   \
-    while (i <= j) {                            \
-        if (ascending) {                        \
-            while (ref[i].field < key)          \
-                i++;                            \
-            while (ref[j].field > key)          \
-                j--;                            \
-        } else {                                \
-            while (ref[i].field > key)          \
-                i++;                            \
-            while (ref[j].field < key)          \
-                j--;                            \
-        }                                       \
-        if (i <= j) {                           \
-            tmp = ref[i];                       \
-            ref[i] = ref[j];                    \
-            ref[j] = tmp;                       \
-            i++;                                \
-            j--;                                \
-        }                                       \
-    }                                           \
-
-static void sort_one(VAPictureH264 ref[], int left, int right,
-                     int ascending, int frame_idx)
-{
-    int i = left, j = right;
-    unsigned int key;
-    VAPictureH264 tmp;
-
-    if (frame_idx) {
-        key = ref[(left + right) / 2].frame_idx;
-        partition(ref, frame_idx, key, ascending);
-    } else {
-        key = ref[(left + right) / 2].TopFieldOrderCnt;
-        partition(ref, TopFieldOrderCnt, (signed int)key, ascending);
-    }
-    
-    /* recursion */
-    if (left < j)
-        sort_one(ref, left, j, ascending, frame_idx);
-    
-    if (i < right)
-        sort_one(ref, i, right, ascending, frame_idx);
-}
-
-static void sort_two(VAPictureH264 ref[], int left, int right, unsigned int key, unsigned int frame_idx,
-                     int partition_ascending, int list0_ascending, int list1_ascending)
+// Given a list like 1 9 3 0 2 8 4 and a pivot element 3, will produce
+//
+//   2 1 0 [3] 4 8 9
+template<class T, class C>
+static void sort_two(T *begin, T *end, const T &pivot, const C &less_than)
 {
-    int i = left, j = right;
-    VAPictureH264 tmp;
-
-    if (frame_idx) {
-        partition(ref, frame_idx, key, partition_ascending);
-    } else {
-        partition(ref, TopFieldOrderCnt, (signed int)key, partition_ascending);
-    }
-    
-
-    sort_one(ref, left, i-1, list0_ascending, frame_idx);
-    sort_one(ref, j+1, right, list1_ascending, frame_idx);
+       T *middle = partition(begin, end, [&](const T &elem) { return less_than(elem, pivot); });
+       sort(begin, middle, [&](const T &a, const T &b) { return less_than(b, a); });
+       sort(middle, end, less_than);
 }
 
-static int update_ReferenceFrames(void)
+void H264EncoderImpl::update_ReferenceFrames(int frame_type)
 {
     int i;
     
-    if (current_frame_type == FRAME_B)
-        return 0;
+    if (frame_type == FRAME_B)
+        return;
 
     CurrentCurrPic.flags = VA_PICTURE_H264_SHORT_TERM_REFERENCE;
     numShortTerm++;
@@ -1263,43 +1270,44 @@ static int update_ReferenceFrames(void)
         ReferenceFrames[i] = ReferenceFrames[i-1];
     ReferenceFrames[0] = CurrentCurrPic;
     
-    if (current_frame_type != FRAME_B)
-        current_frame_num++;
+    current_frame_num++;
     if (current_frame_num > MaxFrameNum)
         current_frame_num = 0;
-    
-    return 0;
 }
 
 
-static int update_RefPicList(void)
+int H264EncoderImpl::update_RefPicList(int frame_type)
 {
-    unsigned int current_poc = CurrentCurrPic.TopFieldOrderCnt;
+    const auto descending_by_frame_idx = [](const VAPictureH264 &a, const VAPictureH264 &b) {
+        return a.frame_idx > b.frame_idx;
+    };
+    const auto ascending_by_top_field_order_cnt = [](const VAPictureH264 &a, const VAPictureH264 &b) {
+        return a.TopFieldOrderCnt < b.TopFieldOrderCnt;
+    };
+    const auto descending_by_top_field_order_cnt = [](const VAPictureH264 &a, const VAPictureH264 &b) {
+        return a.TopFieldOrderCnt > b.TopFieldOrderCnt;
+    };
     
-    if (current_frame_type == FRAME_P) {
+    if (frame_type == FRAME_P) {
         memcpy(RefPicList0_P, ReferenceFrames, numShortTerm * sizeof(VAPictureH264));
-        sort_one(RefPicList0_P, 0, numShortTerm-1, 0, 1);
-    }
-    
-    if (current_frame_type == FRAME_B) {
+        sort(&RefPicList0_P[0], &RefPicList0_P[numShortTerm], descending_by_frame_idx);
+    } else if (frame_type == FRAME_B) {
         memcpy(RefPicList0_B, ReferenceFrames, numShortTerm * sizeof(VAPictureH264));
-        sort_two(RefPicList0_B, 0, numShortTerm-1, current_poc, 0,
-                 1, 0, 1);
+        sort_two(&RefPicList0_B[0], &RefPicList0_B[numShortTerm], CurrentCurrPic, ascending_by_top_field_order_cnt);
 
         memcpy(RefPicList1_B, ReferenceFrames, numShortTerm * sizeof(VAPictureH264));
-        sort_two(RefPicList1_B, 0, numShortTerm-1, current_poc, 0,
-                 0, 1, 0);
+        sort_two(&RefPicList1_B[0], &RefPicList1_B[numShortTerm], CurrentCurrPic, descending_by_top_field_order_cnt);
     }
     
     return 0;
 }
 
 
-static int render_sequence(void)
+int H264EncoderImpl::render_sequence()
 {
-    VABufferID seq_param_buf, rc_param_buf, misc_param_tmpbuf, render_id[2];
+    VABufferID seq_param_buf, rc_param_buf, render_id[2];
     VAStatus va_status;
-    VAEncMiscParameterBuffer *misc_param, *misc_param_tmp;
+    VAEncMiscParameterBuffer *misc_param;
     VAEncMiscParameterRateControl *misc_rate_ctrl;
     
     seq_param.level_idc = 41 /*SH_LEVEL_3*/;
@@ -1356,33 +1364,18 @@ static int render_sequence(void)
     render_id[0] = seq_param_buf;
     render_id[1] = rc_param_buf;
     
-    va_status = vaRenderPicture(va_dpy, context_id, &render_id[0], 2);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");;
-
-    if (misc_priv_type != 0) {
-        va_status = vaCreateBuffer(va_dpy, context_id,
-                                   VAEncMiscParameterBufferType,
-                                   sizeof(VAEncMiscParameterBuffer),
-                                   1, NULL, &misc_param_tmpbuf);
-        CHECK_VASTATUS(va_status, "vaCreateBuffer");
-        vaMapBuffer(va_dpy, misc_param_tmpbuf, (void **)&misc_param_tmp);
-        misc_param_tmp->type = (VAEncMiscParameterType)misc_priv_type;
-        misc_param_tmp->data[0] = misc_priv_value;
-        vaUnmapBuffer(va_dpy, misc_param_tmpbuf);
-    
-        va_status = vaRenderPicture(va_dpy, context_id, &misc_param_tmpbuf, 1);
-    }
+    render_picture_and_delete(va_dpy, context_id, &render_id[0], 2);
     
     return 0;
 }
 
-static int calc_poc(int pic_order_cnt_lsb)
+static int calc_poc(int pic_order_cnt_lsb, int frame_type)
 {
     static int PicOrderCntMsb_ref = 0, pic_order_cnt_lsb_ref = 0;
     int prevPicOrderCntMsb, prevPicOrderCntLsb;
     int PicOrderCntMsb, TopFieldOrderCnt;
     
-    if (current_frame_type == FRAME_IDR)
+    if (frame_type == FRAME_IDR)
         prevPicOrderCntMsb = prevPicOrderCntLsb = 0;
     else {
         prevPicOrderCntMsb = PicOrderCntMsb_ref;
@@ -1400,7 +1393,7 @@ static int calc_poc(int pic_order_cnt_lsb)
     
     TopFieldOrderCnt = PicOrderCntMsb + pic_order_cnt_lsb;
 
-    if (current_frame_type != FRAME_B) {
+    if (frame_type != FRAME_B) {
         PicOrderCntMsb_ref = PicOrderCntMsb;
         pic_order_cnt_lsb_ref = pic_order_cnt_lsb;
     }
@@ -1408,56 +1401,44 @@ static int calc_poc(int pic_order_cnt_lsb)
     return TopFieldOrderCnt;
 }
 
-static int render_picture(void)
+int H264EncoderImpl::render_picture(int frame_type, int display_frame_num, int gop_start_display_frame_num)
 {
     VABufferID pic_param_buf;
     VAStatus va_status;
     int i = 0;
 
-    pic_param.CurrPic.picture_id = gl_surfaces[current_frame_display % SURFACE_NUM].ref_surface;
+    pic_param.CurrPic.picture_id = gl_surfaces[display_frame_num % SURFACE_NUM].ref_surface;
     pic_param.CurrPic.frame_idx = current_frame_num;
     pic_param.CurrPic.flags = 0;
-    pic_param.CurrPic.TopFieldOrderCnt = calc_poc((current_frame_display - current_IDR_display) % MaxPicOrderCntLsb);
+    pic_param.CurrPic.TopFieldOrderCnt = calc_poc((display_frame_num - gop_start_display_frame_num) % MaxPicOrderCntLsb, frame_type);
     pic_param.CurrPic.BottomFieldOrderCnt = pic_param.CurrPic.TopFieldOrderCnt;
     CurrentCurrPic = pic_param.CurrPic;
 
-    if (getenv("TO_DEL")) { /* set RefPicList into ReferenceFrames */
-        update_RefPicList(); /* calc RefPicList */
-        memset(pic_param.ReferenceFrames, 0xff, 16 * sizeof(VAPictureH264)); /* invalid all */
-        if (current_frame_type == FRAME_P) {
-            pic_param.ReferenceFrames[0] = RefPicList0_P[0];
-        } else if (current_frame_type == FRAME_B) {
-            pic_param.ReferenceFrames[0] = RefPicList0_B[0];
-            pic_param.ReferenceFrames[1] = RefPicList1_B[0];
-        }
-    } else {
-        memcpy(pic_param.ReferenceFrames, ReferenceFrames, numShortTerm*sizeof(VAPictureH264));
-        for (i = numShortTerm; i < SURFACE_NUM; i++) {
-            pic_param.ReferenceFrames[i].picture_id = VA_INVALID_SURFACE;
-            pic_param.ReferenceFrames[i].flags = VA_PICTURE_H264_INVALID;
-        }
+    memcpy(pic_param.ReferenceFrames, ReferenceFrames, numShortTerm*sizeof(VAPictureH264));
+    for (i = numShortTerm; i < MAX_NUM_REF1; i++) {
+        pic_param.ReferenceFrames[i].picture_id = VA_INVALID_SURFACE;
+        pic_param.ReferenceFrames[i].flags = VA_PICTURE_H264_INVALID;
     }
     
-    pic_param.pic_fields.bits.idr_pic_flag = (current_frame_type == FRAME_IDR);
-    pic_param.pic_fields.bits.reference_pic_flag = (current_frame_type != FRAME_B);
+    pic_param.pic_fields.bits.idr_pic_flag = (frame_type == FRAME_IDR);
+    pic_param.pic_fields.bits.reference_pic_flag = (frame_type != FRAME_B);
     pic_param.pic_fields.bits.entropy_coding_mode_flag = h264_entropy_mode;
     pic_param.pic_fields.bits.deblocking_filter_control_present_flag = 1;
     pic_param.frame_num = current_frame_num;
-    pic_param.coded_buf = gl_surfaces[current_frame_display % SURFACE_NUM].coded_buf;
+    pic_param.coded_buf = gl_surfaces[display_frame_num % SURFACE_NUM].coded_buf;
     pic_param.last_picture = false;  // FIXME
     pic_param.pic_init_qp = initial_qp;
 
     va_status = vaCreateBuffer(va_dpy, context_id, VAEncPictureParameterBufferType,
                                sizeof(pic_param), 1, &pic_param, &pic_param_buf);
-    CHECK_VASTATUS(va_status, "vaCreateBuffer");;
+    CHECK_VASTATUS(va_status, "vaCreateBuffer");
 
-    va_status = vaRenderPicture(va_dpy, context_id, &pic_param_buf, 1);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
+    render_picture_and_delete(va_dpy, context_id, &pic_param_buf, 1);
 
     return 0;
 }
 
-static int render_packedsequence(void)
+int H264EncoderImpl::render_packedsequence()
 {
     VAEncPackedHeaderParameterBuffer packedheader_param_buffer;
     VABufferID packedseq_para_bufid, packedseq_data_bufid, render_id[2];
@@ -1487,8 +1468,7 @@ static int render_packedsequence(void)
 
     render_id[0] = packedseq_para_bufid;
     render_id[1] = packedseq_data_bufid;
-    va_status = vaRenderPicture(va_dpy, context_id, render_id, 2);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
+    render_picture_and_delete(va_dpy, context_id, render_id, 2);
 
     free(packedseq_buffer);
     
@@ -1496,7 +1476,7 @@ static int render_packedsequence(void)
 }
 
 
-static int render_packedpicture(void)
+int H264EncoderImpl::render_packedpicture()
 {
     VAEncPackedHeaderParameterBuffer packedheader_param_buffer;
     VABufferID packedpic_para_bufid, packedpic_data_bufid, render_id[2];
@@ -1525,15 +1505,14 @@ static int render_packedpicture(void)
 
     render_id[0] = packedpic_para_bufid;
     render_id[1] = packedpic_data_bufid;
-    va_status = vaRenderPicture(va_dpy, context_id, render_id, 2);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
+    render_picture_and_delete(va_dpy, context_id, render_id, 2);
 
     free(packedpic_buffer);
     
     return 0;
 }
 
-static void render_packedslice()
+void H264EncoderImpl::render_packedslice()
 {
     VAEncPackedHeaderParameterBuffer packedheader_param_buffer;
     VABufferID packedslice_para_bufid, packedslice_data_bufid, render_id[2];
@@ -1562,47 +1541,46 @@ static void render_packedslice()
 
     render_id[0] = packedslice_para_bufid;
     render_id[1] = packedslice_data_bufid;
-    va_status = vaRenderPicture(va_dpy, context_id, render_id, 2);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
+    render_picture_and_delete(va_dpy, context_id, render_id, 2);
 
     free(packedslice_buffer);
 }
 
-static int render_slice(void)
+int H264EncoderImpl::render_slice(int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num, int frame_type)
 {
     VABufferID slice_param_buf;
     VAStatus va_status;
     int i;
 
-    update_RefPicList();
+    update_RefPicList(frame_type);
     
     /* one frame, one slice */
     slice_param.macroblock_address = 0;
     slice_param.num_macroblocks = frame_width_mbaligned * frame_height_mbaligned/(16*16); /* Measured by MB */
-    slice_param.slice_type = (current_frame_type == FRAME_IDR)?2:current_frame_type;
-    if (current_frame_type == FRAME_IDR) {
-        if (current_frame_encoding != 0)
+    slice_param.slice_type = (frame_type == FRAME_IDR)?2:frame_type;
+    if (frame_type == FRAME_IDR) {
+        if (encoding_frame_num != 0)
             ++slice_param.idr_pic_id;
-    } else if (current_frame_type == FRAME_P) {
+    } else if (frame_type == FRAME_P) {
         int refpiclist0_max = h264_maxref & 0xffff;
         memcpy(slice_param.RefPicList0, RefPicList0_P, refpiclist0_max*sizeof(VAPictureH264));
 
-        for (i = refpiclist0_max; i < 32; i++) {
+        for (i = refpiclist0_max; i < MAX_NUM_REF2; i++) {
             slice_param.RefPicList0[i].picture_id = VA_INVALID_SURFACE;
             slice_param.RefPicList0[i].flags = VA_PICTURE_H264_INVALID;
         }
-    } else if (current_frame_type == FRAME_B) {
+    } else if (frame_type == FRAME_B) {
         int refpiclist0_max = h264_maxref & 0xffff;
         int refpiclist1_max = (h264_maxref >> 16) & 0xffff;
 
         memcpy(slice_param.RefPicList0, RefPicList0_B, refpiclist0_max*sizeof(VAPictureH264));
-        for (i = refpiclist0_max; i < 32; i++) {
+        for (i = refpiclist0_max; i < MAX_NUM_REF2; i++) {
             slice_param.RefPicList0[i].picture_id = VA_INVALID_SURFACE;
             slice_param.RefPicList0[i].flags = VA_PICTURE_H264_INVALID;
         }
 
         memcpy(slice_param.RefPicList1, RefPicList1_B, refpiclist1_max*sizeof(VAPictureH264));
-        for (i = refpiclist1_max; i < 32; i++) {
+        for (i = refpiclist1_max; i < MAX_NUM_REF2; i++) {
             slice_param.RefPicList1[i].picture_id = VA_INVALID_SURFACE;
             slice_param.RefPicList1[i].flags = VA_PICTURE_H264_INVALID;
         }
@@ -1611,7 +1589,7 @@ static int render_slice(void)
     slice_param.slice_alpha_c0_offset_div2 = 0;
     slice_param.slice_beta_offset_div2 = 0;
     slice_param.direct_spatial_mv_pred_flag = 1;
-    slice_param.pic_order_cnt_lsb = (current_frame_display - current_IDR_display) % MaxPicOrderCntLsb;
+    slice_param.pic_order_cnt_lsb = (display_frame_num - gop_start_display_frame_num) % MaxPicOrderCntLsb;
     
 
     if (h264_packedheader &&
@@ -1620,138 +1598,181 @@ static int render_slice(void)
 
     va_status = vaCreateBuffer(va_dpy, context_id, VAEncSliceParameterBufferType,
                                sizeof(slice_param), 1, &slice_param, &slice_param_buf);
-    CHECK_VASTATUS(va_status, "vaCreateBuffer");;
+    CHECK_VASTATUS(va_status, "vaCreateBuffer");
+
+    render_picture_and_delete(va_dpy, context_id, &slice_param_buf, 1);
 
-    va_status = vaRenderPicture(va_dpy, context_id, &slice_param_buf, 1);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
-    
     return 0;
 }
 
 
 
-int H264Encoder::save_codeddata(storage_task task)
+void H264EncoderImpl::save_codeddata(storage_task task)
 {    
-    VACodedBufferSegment *buf_list = NULL;
-    VAStatus va_status;
-    unsigned int coded_size = 0;
+       VACodedBufferSegment *buf_list = NULL;
+       VAStatus va_status;
 
-    string data;
+       string data;
 
-    const int64_t global_delay = (ip_period - 1) * (TIMEBASE / frame_rate);  // So we never get negative dts.
+       va_status = vaMapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list));
+       CHECK_VASTATUS(va_status, "vaMapBuffer");
+       while (buf_list != NULL) {
+               data.append(reinterpret_cast<const char *>(buf_list->buf), buf_list->size);
+               buf_list = (VACodedBufferSegment *) buf_list->next;
+       }
+       vaUnmapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf);
 
-    va_status = vaMapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list));
-    CHECK_VASTATUS(va_status, "vaMapBuffer");
-    while (buf_list != NULL) {
-        data.append(reinterpret_cast<const char *>(buf_list->buf), buf_list->size);
-        buf_list = (VACodedBufferSegment *) buf_list->next;
+       {
+               // Add video.
+               AVPacket pkt;
+               memset(&pkt, 0, sizeof(pkt));
+               pkt.buf = nullptr;
+               pkt.data = reinterpret_cast<uint8_t *>(&data[0]);
+               pkt.size = data.size();
+               pkt.stream_index = 0;
+               if (task.frame_type == FRAME_IDR) {
+                       pkt.flags = AV_PKT_FLAG_KEY;
+               } else {
+                       pkt.flags = 0;
+               }
+               //pkt.duration = 1;
+               if (file_mux) {
+                       file_mux->add_packet(pkt, task.pts + global_delay(), task.dts + global_delay());
+               }
+               if (!global_flags.uncompressed_video_to_http) {
+                       httpd->add_packet(pkt, task.pts + global_delay(), task.dts + global_delay());
+               }
+       }
+       // Encode and add all audio frames up to and including the pts of this video frame.
+       for ( ;; ) {
+               int64_t audio_pts;
+               vector<float> audio;
+               {
+                       unique_lock<mutex> lock(frame_queue_mutex);
+                       frame_queue_nonempty.wait(lock, [this]{ return storage_thread_should_quit || !pending_audio_frames.empty(); });
+                       if (storage_thread_should_quit && pending_audio_frames.empty()) return;
+                       auto it = pending_audio_frames.begin();
+                       if (it->first > task.pts) break;
+                       audio_pts = it->first;
+                       audio = move(it->second);
+                       pending_audio_frames.erase(it); 
+               }
 
-        frame_size += coded_size;
-    }
-    vaUnmapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf);
-
-    {
-        // Add video.
-        AVPacket pkt;
-        memset(&pkt, 0, sizeof(pkt));
-        pkt.buf = nullptr;
-        pkt.pts = av_rescale_q(task.pts + global_delay, AVRational{1, TIMEBASE}, avstream_video->time_base);
-        pkt.dts = av_rescale_q(task.dts + global_delay, AVRational{1, TIMEBASE}, avstream_video->time_base);
-        pkt.data = reinterpret_cast<uint8_t *>(&data[0]);
-        pkt.size = data.size();
-        pkt.stream_index = 0;
-        if (task.frame_type == FRAME_IDR || task.frame_type == FRAME_I) {
-            pkt.flags = AV_PKT_FLAG_KEY;
-        } else {
-            pkt.flags = 0;
-        }
-        //pkt.duration = 1;
-        av_interleaved_write_frame(avctx, &pkt);
-    }
-    // Encode and add all audio frames up to and including the pts of this video frame.
-    // (They can never be queued to us after the video frame they belong to, only before.)
-    for ( ;; ) {
-        int64_t audio_pts;
-        std::vector<float> audio;
-        {
-             unique_lock<mutex> lock(frame_queue_mutex);
-             if (pending_audio_frames.empty()) break;
-             auto it = pending_audio_frames.begin();
-             if (it->first > task.pts) break;
-             audio_pts = it->first;
-             audio = move(it->second);
-             pending_audio_frames.erase(it); 
-        }
-        AVFrame *frame = avcodec_alloc_frame();
-        frame->nb_samples = audio.size() / 2;
-        frame->format = AV_SAMPLE_FMT_FLT;
-        frame->channel_layout = AV_CH_LAYOUT_STEREO;
-
-        unique_ptr<float[]> planar_samples(new float[audio.size()]);
-        avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_FLTP, (const uint8_t*)planar_samples.get(), audio.size() * sizeof(float), 0);
-        for (int i = 0; i < frame->nb_samples; ++i) {
-            planar_samples[i] = audio[i * 2 + 0];
-            planar_samples[i + frame->nb_samples] = audio[i * 2 + 1];
-        }
+               if (context_audio_stream) {
+                       encode_audio(audio, &audio_queue_file, audio_pts, context_audio_file, { file_mux.get() });
+                       encode_audio(audio, &audio_queue_stream, audio_pts, context_audio_stream, { httpd });
+               } else {
+                       encode_audio(audio, &audio_queue_file, audio_pts, context_audio_file, { httpd, file_mux.get() });
+               }
 
-        AVPacket pkt;
-        av_init_packet(&pkt);
-        pkt.data = nullptr;
-        pkt.size = 0;
-        int got_output;
-        avcodec_encode_audio2(avstream_audio->codec, &pkt, frame, &got_output);
-        if (got_output) {
-            pkt.pts = av_rescale_q(audio_pts + global_delay, AVRational{1, TIMEBASE}, avstream_audio->time_base);
-            pkt.dts = pkt.pts;
-            pkt.stream_index = 1;
-            av_interleaved_write_frame(avctx, &pkt);
-        }
-        // TODO: Delayed frames.
-        avcodec_free_frame(&frame);
-    }
+               if (audio_pts == task.pts) break;
+       }
+}
 
-#if 0
-    printf("\r      "); /* return back to startpoint */
-    switch (encode_order % 4) {
-        case 0:
-            printf("|");
-            break;
-        case 1:
-            printf("/");
-            break;
-        case 2:
-            printf("-");
-            break;
-        case 3:
-            printf("\\");
-            break;
-    }
-    printf("%08lld", encode_order);
-    printf("(%06d bytes coded)", coded_size);
-#endif
+void H264EncoderImpl::encode_audio(
+       const vector<float> &audio,
+       vector<float> *audio_queue,
+       int64_t audio_pts,
+       AVCodecContext *ctx,
+       const vector<PacketDestination *> &destinations)
+{
+       if (ctx->frame_size == 0) {
+               // No queueing needed.
+               assert(audio_queue->empty());
+               assert(audio.size() % 2 == 0);
+               encode_audio_one_frame(&audio[0], audio.size() / 2, audio_pts, ctx, destinations);
+               return;
+       }
 
-    return 0;
+       audio_queue->insert(audio_queue->end(), audio.begin(), audio.end());
+       size_t sample_num;
+       for (sample_num = 0;
+            sample_num + ctx->frame_size * 2 <= audio_queue->size();
+            sample_num += ctx->frame_size * 2) {
+               encode_audio_one_frame(&(*audio_queue)[sample_num],
+                                      ctx->frame_size,
+                                      audio_pts,
+                                      ctx,
+                                      destinations);
+       }
+       audio_queue->erase(audio_queue->begin(), audio_queue->begin() + sample_num);
 }
 
+void H264EncoderImpl::encode_audio_one_frame(
+       const float *audio,
+       size_t num_samples,
+       int64_t audio_pts,
+       AVCodecContext *ctx,
+       const vector<PacketDestination *> &destinations)
+{
+       audio_frame->nb_samples = num_samples;
+       audio_frame->channel_layout = AV_CH_LAYOUT_STEREO;
+
+       unique_ptr<float[]> planar_samples;
+       unique_ptr<int32_t[]> int_samples;
+
+       if (ctx->sample_fmt == AV_SAMPLE_FMT_FLTP) {
+               audio_frame->format = AV_SAMPLE_FMT_FLTP;
+               planar_samples.reset(new float[num_samples * 2]);
+               avcodec_fill_audio_frame(audio_frame, 2, AV_SAMPLE_FMT_FLTP, (const uint8_t*)planar_samples.get(), num_samples * 2 * sizeof(float), 0);
+               for (size_t i = 0; i < num_samples; ++i) {
+                       planar_samples[i] = audio[i * 2 + 0];
+                       planar_samples[i + num_samples] = audio[i * 2 + 1];
+               }
+       } else {
+               assert(ctx->sample_fmt == AV_SAMPLE_FMT_S32);
+               int_samples.reset(new int32_t[num_samples * 2]);
+               int ret = avcodec_fill_audio_frame(audio_frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), num_samples * 2 * sizeof(int32_t), 1);
+               if (ret < 0) {
+                       fprintf(stderr, "avcodec_fill_audio_frame() failed with %d\n", ret);
+                       exit(1);
+               }
+               for (size_t i = 0; i < num_samples * 2; ++i) {
+                       if (audio[i] >= 1.0f) {
+                               int_samples[i] = 2147483647;
+                       } else if (audio[i] <= -1.0f) {
+                               int_samples[i] = -2147483647;
+                       } else {
+                               int_samples[i] = lrintf(audio[i] * 2147483647.0f);
+                       }
+               }
+       }
+
+       AVPacket pkt;
+       av_init_packet(&pkt);
+       pkt.data = nullptr;
+       pkt.size = 0;
+       int got_output = 0;
+       avcodec_encode_audio2(ctx, &pkt, audio_frame, &got_output);
+       if (got_output) {
+               pkt.stream_index = 1;
+               pkt.flags = AV_PKT_FLAG_KEY;
+               for (PacketDestination *dest : destinations) {
+                       dest->add_packet(pkt, audio_pts + global_delay(), audio_pts + global_delay());
+               }
+       }
+       // TODO: Delayed frames.
+       av_frame_unref(audio_frame);
+       av_free_packet(&pkt);
+}
 
 // this is weird. but it seems to put a new frame onto the queue
-void H264Encoder::storage_task_enqueue(storage_task task)
+void H264EncoderImpl::storage_task_enqueue(storage_task task)
 {
-       std::unique_lock<std::mutex> lock(storage_task_queue_mutex);
+       unique_lock<mutex> lock(storage_task_queue_mutex);
        storage_task_queue.push(move(task));
-       srcsurface_status[task.display_order % SURFACE_NUM] = SRC_SURFACE_IN_ENCODING;
        storage_task_queue_changed.notify_all();
 }
 
-void H264Encoder::storage_task_thread()
+void H264EncoderImpl::storage_task_thread()
 {
        for ( ;; ) {
                storage_task current;
                {
                        // wait until there's an encoded frame  
-                       std::unique_lock<std::mutex> lock(storage_task_queue_mutex);
+                       unique_lock<mutex> lock(storage_task_queue_mutex);
                        storage_task_queue_changed.wait(lock, [this]{ return storage_thread_should_quit || !storage_task_queue.empty(); });
-                       if (storage_thread_should_quit) return;
+                       if (storage_thread_should_quit && storage_task_queue.empty()) return;
                        current = move(storage_task_queue.front());
                        storage_task_queue.pop();
                }
@@ -1764,30 +1785,37 @@ void H264Encoder::storage_task_thread()
                save_codeddata(move(current));
 
                {
-                       std::unique_lock<std::mutex> lock(storage_task_queue_mutex);
+                       unique_lock<mutex> lock(storage_task_queue_mutex);
                        srcsurface_status[current.display_order % SURFACE_NUM] = SRC_SURFACE_FREE;
                        storage_task_queue_changed.notify_all();
                }
        }
 }
 
-static int release_encode()
+int H264EncoderImpl::release_encode()
 {
-    int i;
-    
-    for (i = 0; i < SURFACE_NUM; i++) {
-        vaDestroyBuffer(va_dpy, gl_surfaces[i].coded_buf);
-        vaDestroySurfaces(va_dpy, &gl_surfaces[i].src_surface, 1);
-        vaDestroySurfaces(va_dpy, &gl_surfaces[i].ref_surface, 1);
-    }
-    
-    vaDestroyContext(va_dpy, context_id);
-    vaDestroyConfig(va_dpy, config_id);
+       for (unsigned i = 0; i < SURFACE_NUM; i++) {
+               vaDestroyBuffer(va_dpy, gl_surfaces[i].coded_buf);
+               vaDestroySurfaces(va_dpy, &gl_surfaces[i].src_surface, 1);
+               vaDestroySurfaces(va_dpy, &gl_surfaces[i].ref_surface, 1);
+
+               if (!use_zerocopy) {
+                       glBindBuffer(GL_PIXEL_PACK_BUFFER, gl_surfaces[i].pbo);
+                       glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
+                       glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+                       glDeleteBuffers(1, &gl_surfaces[i].pbo);
+               }
+               glDeleteTextures(1, &gl_surfaces[i].y_tex);
+               glDeleteTextures(1, &gl_surfaces[i].cbcr_tex);
+       }
 
-    return 0;
+       vaDestroyContext(va_dpy, context_id);
+       vaDestroyConfig(va_dpy, config_id);
+
+       return 0;
 }
 
-static int deinit_va()
+int H264EncoderImpl::deinit_va()
 { 
     vaTerminate(va_dpy);
 
@@ -1796,89 +1824,71 @@ static int deinit_va()
     return 0;
 }
 
+namespace {
 
-static int print_input()
-{
-    printf("\n\nINPUT:Try to encode H264...\n");
-    if (rc_mode != -1)
-        printf("INPUT: RateControl  : %s\n", rc_to_string(rc_mode));
-    printf("INPUT: Resolution   : %dx%dframes\n", frame_width, frame_height);
-    printf("INPUT: FrameRate    : %d\n", frame_rate);
-    printf("INPUT: Bitrate      : %d\n", frame_bitrate);
-    printf("INPUT: Slieces      : %d\n", frame_slices);
-    printf("INPUT: IntraPeriod  : %d\n", intra_period);
-    printf("INPUT: IDRPeriod    : %d\n", intra_idr_period);
-    printf("INPUT: IpPeriod     : %d\n", ip_period);
-    printf("INPUT: Initial QP   : %d\n", initial_qp);
-    printf("INPUT: Min QP       : %d\n", minimal_qp);
-    printf("INPUT: Coded Clip   : %s\n", coded_fn);
-    
-    printf("\n\n"); /* return back to startpoint */
-    
-    return 0;
-}
-
-
-//H264Encoder::H264Encoder(SDL_Window *window, SDL_GLContext context, int width, int height, const char *output_filename) 
-H264Encoder::H264Encoder(QSurface *surface, int width, int height, const char *output_filename)
-       : current_storage_frame(0), surface(surface)
-       //: width(width), height(height), current_encoding_frame(0)
+void init_audio_encoder(const string &codec_name, int bit_rate, AVCodecContext **ctx)
 {
-       av_register_all();
-       avctx = avformat_alloc_context();
-       avctx->oformat = av_guess_format(NULL, output_filename, NULL);
-       strcpy(avctx->filename, output_filename);
-       if (avio_open2(&avctx->pb, output_filename, AVIO_FLAG_WRITE, &avctx->interrupt_callback, NULL) < 0) {
-               fprintf(stderr, "%s: avio_open2() failed\n", output_filename);
+       AVCodec *codec_audio = avcodec_find_encoder_by_name(codec_name.c_str());
+       if (codec_audio == nullptr) {
+               fprintf(stderr, "ERROR: Could not find codec '%s'\n", codec_name.c_str());
                exit(1);
        }
-       AVCodec *codec_video = avcodec_find_encoder(AV_CODEC_ID_H264);
-       avstream_video = avformat_new_stream(avctx, codec_video);
-       if (avstream_video == nullptr) {
-               fprintf(stderr, "%s: avformat_new_stream() failed\n", output_filename);
-               exit(1);
+
+       AVCodecContext *context_audio = avcodec_alloc_context3(codec_audio);
+       context_audio->bit_rate = bit_rate;
+       context_audio->sample_rate = OUTPUT_FREQUENCY;
+
+       // Choose sample format; we currently only support these two
+       // (see encode_audio), so we're a bit picky.
+       const AVSampleFormat *ptr = codec_audio->sample_fmts;
+       for ( ; *ptr != -1; ++ptr) {
+               if (*ptr == AV_SAMPLE_FMT_FLTP || *ptr == AV_SAMPLE_FMT_S32) {
+                       context_audio->sample_fmt = *ptr;
+                       break;
+               }
        }
-       avstream_video->time_base = AVRational{1, TIMEBASE};
-       avstream_video->codec->width = width;
-       avstream_video->codec->height = height;
-       avstream_video->codec->time_base = AVRational{1, TIMEBASE};
-       avstream_video->codec->ticks_per_frame = 1;  // or 2?
-
-       AVCodec *codec_audio = avcodec_find_encoder(AV_CODEC_ID_MP3);
-       avstream_audio = avformat_new_stream(avctx, codec_audio);
-       if (avstream_audio == nullptr) {
-               fprintf(stderr, "%s: avformat_new_stream() failed\n", output_filename);
+       if (*ptr == -1) {
+               fprintf(stderr, "ERROR: Audio codec does not support fltp or s32 sample formats\n");
                exit(1);
        }
-       avstream_audio->time_base = AVRational{1, TIMEBASE};
-       avstream_audio->codec->bit_rate = 256000;
-       avstream_audio->codec->sample_rate = 48000;
-       avstream_audio->codec->sample_fmt = AV_SAMPLE_FMT_FLTP;
-       avstream_audio->codec->channels = 2;
-       avstream_audio->codec->channel_layout = AV_CH_LAYOUT_STEREO;
-       avstream_audio->codec->time_base = AVRational{1, TIMEBASE};
-
-       /* open it */
-       if (avcodec_open2(avstream_audio->codec, codec_audio, NULL) < 0) {
-               fprintf(stderr, "Could not open codec\n");
+
+       context_audio->channels = 2;
+       context_audio->channel_layout = AV_CH_LAYOUT_STEREO;
+       context_audio->time_base = AVRational{1, TIMEBASE};
+       if (avcodec_open2(context_audio, codec_audio, NULL) < 0) {
+               fprintf(stderr, "Could not open codec '%s'\n", codec_name.c_str());
                exit(1);
        }
 
-       if (avformat_write_header(avctx, NULL) < 0) {
-               fprintf(stderr, "%s: avformat_write_header() failed\n", output_filename);
-               exit(1);
+       *ctx = context_audio;
+}
+
+}  // namespace
+
+H264EncoderImpl::H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd)
+       : current_storage_frame(0), surface(surface), httpd(httpd)
+{
+       init_audio_encoder(AUDIO_OUTPUT_CODEC_NAME, DEFAULT_AUDIO_OUTPUT_BIT_RATE, &context_audio_file);
+
+       if (!global_flags.stream_audio_codec_name.empty()) {
+               init_audio_encoder(global_flags.stream_audio_codec_name,
+                       global_flags.stream_audio_codec_bitrate, &context_audio_stream);
        }
 
+       audio_frame = av_frame_alloc();
+
        frame_width = width;
        frame_height = height;
        frame_width_mbaligned = (frame_width + 15) & (~15);
        frame_height_mbaligned = (frame_height + 15) & (~15);
-        frame_bitrate = 15000000;  // / 60;
-       current_frame_encoding = 0;
 
-       print_input();
+       //print_input();
+
+       if (global_flags.uncompressed_video_to_http) {
+               reorderer.reset(new FrameReorderer(ip_period - 1, frame_width, frame_height));
+       }
 
-       init_va();
+       init_va(va_display);
        setup_encode();
 
        // No frames are ready yet.
@@ -1888,49 +1898,41 @@ H264Encoder::H264Encoder(QSurface *surface, int width, int height, const char *o
        memset(&pic_param, 0, sizeof(pic_param));
        memset(&slice_param, 0, sizeof(slice_param));
 
-       storage_thread = std::thread(&H264Encoder::storage_task_thread, this);
+       storage_thread = thread(&H264EncoderImpl::storage_task_thread, this);
 
-       copy_thread = std::thread([this]{
+       encode_thread = thread([this]{
                //SDL_GL_MakeCurrent(window, context);
-               QOpenGLContext *context = create_context();
+               QOpenGLContext *context = create_context(this->surface);
                eglBindAPI(EGL_OPENGL_API);
                if (!make_current(context, this->surface)) {
                        printf("display=%p surface=%p context=%p curr=%p err=%d\n", eglGetCurrentDisplay(), this->surface, context, eglGetCurrentContext(),
                                eglGetError());
                        exit(1);
                }
-               copy_thread_func();
+               encode_thread_func();
        });
 }
 
-H264Encoder::~H264Encoder()
+H264EncoderImpl::~H264EncoderImpl()
 {
-       {
-               unique_lock<mutex> lock(storage_task_queue_mutex);
-               storage_thread_should_quit = true;
-               storage_task_queue_changed.notify_all();
-       }
-       {
-               unique_lock<mutex> lock(frame_queue_mutex);
-               copy_thread_should_quit = true;
-               frame_queue_nonempty.notify_one();
-       }
-       storage_thread.join();
-       copy_thread.join();
-
-       release_encode();
-       deinit_va();
+       shutdown();
+       av_frame_free(&audio_frame);
 
-       av_write_trailer(avctx);
-       avformat_free_context(avctx);
+       // TODO: Destroy context.
 }
 
-bool H264Encoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
+bool H264EncoderImpl::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
 {
+       assert(!is_shutdown);
        {
                // Wait until this frame slot is done encoding.
-               std::unique_lock<std::mutex> lock(storage_task_queue_mutex);
+               unique_lock<mutex> lock(storage_task_queue_mutex);
+               if (srcsurface_status[current_storage_frame % SURFACE_NUM] != SRC_SURFACE_FREE) {
+                       fprintf(stderr, "Warning: Slot %d (for frame %d) is still encoding, rendering has to wait for H.264 encoder\n",
+                               current_storage_frame % SURFACE_NUM, current_storage_frame);
+               }
                storage_task_queue_changed.wait(lock, [this]{ return storage_thread_should_quit || (srcsurface_status[current_storage_frame % SURFACE_NUM] == SRC_SURFACE_FREE); });
+               srcsurface_status[current_storage_frame % SURFACE_NUM] = SRC_SURFACE_IN_ENCODING;
                if (storage_thread_should_quit) return false;
        }
 
@@ -1939,156 +1941,378 @@ bool H264Encoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
        *y_tex = surf->y_tex;
        *cbcr_tex = surf->cbcr_tex;
 
-       VASurfaceID surface = surf->src_surface;
-        VAStatus va_status = vaDeriveImage(va_dpy, surface, &surf->surface_image);
-        CHECK_VASTATUS(va_status, "vaDeriveImage");
-
-       VABufferInfo buf_info;
-       buf_info.mem_type = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME;  // or VA_SURFACE_ATTRIB_MEM_TYPE_KERNEL_DRM?
-       va_status = vaAcquireBufferHandle(va_dpy, surf->surface_image.buf, &buf_info);
-        CHECK_VASTATUS(va_status, "vaAcquireBufferHandle");
-
-       // Create Y image.
-       surf->y_egl_image = EGL_NO_IMAGE_KHR;
-       EGLint y_attribs[] = {
-               EGL_WIDTH, frame_width,
-               EGL_HEIGHT, frame_height,
-               EGL_LINUX_DRM_FOURCC_EXT, fourcc_code('R', '8', ' ', ' '),
-               EGL_DMA_BUF_PLANE0_FD_EXT, EGLint(buf_info.handle),
-               EGL_DMA_BUF_PLANE0_OFFSET_EXT, EGLint(surf->surface_image.offsets[0]),
-               EGL_DMA_BUF_PLANE0_PITCH_EXT, EGLint(surf->surface_image.pitches[0]),
-               EGL_NONE
-       };
-
-       surf->y_egl_image = eglCreateImageKHR(eglGetCurrentDisplay(), EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, y_attribs);
-       assert(surf->y_egl_image != EGL_NO_IMAGE_KHR);
-
-       // Associate Y image to a texture.
-       glBindTexture(GL_TEXTURE_2D, *y_tex);
-       glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, surf->y_egl_image);
-
-       // Create CbCr image.
-       surf->cbcr_egl_image = EGL_NO_IMAGE_KHR;
-       EGLint cbcr_attribs[] = {
-               EGL_WIDTH, frame_width,
-               EGL_HEIGHT, frame_height,
-               EGL_LINUX_DRM_FOURCC_EXT, fourcc_code('G', 'R', '8', '8'),
-               EGL_DMA_BUF_PLANE0_FD_EXT, EGLint(buf_info.handle),
-               EGL_DMA_BUF_PLANE0_OFFSET_EXT, EGLint(surf->surface_image.offsets[1]),
-               EGL_DMA_BUF_PLANE0_PITCH_EXT, EGLint(surf->surface_image.pitches[1]),
-               EGL_NONE
-       };
-
-       surf->cbcr_egl_image = eglCreateImageKHR(eglGetCurrentDisplay(), EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, cbcr_attribs);
-       assert(surf->cbcr_egl_image != EGL_NO_IMAGE_KHR);
-
-       // Associate CbCr image to a texture.
-       glBindTexture(GL_TEXTURE_2D, *cbcr_tex);
-       glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, surf->cbcr_egl_image);
+       VAStatus va_status = vaDeriveImage(va_dpy, surf->src_surface, &surf->surface_image);
+       CHECK_VASTATUS(va_status, "vaDeriveImage");
+
+       if (use_zerocopy) {
+               VABufferInfo buf_info;
+               buf_info.mem_type = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME;  // or VA_SURFACE_ATTRIB_MEM_TYPE_KERNEL_DRM?
+               va_status = vaAcquireBufferHandle(va_dpy, surf->surface_image.buf, &buf_info);
+               CHECK_VASTATUS(va_status, "vaAcquireBufferHandle");
+
+               // Create Y image.
+               surf->y_egl_image = EGL_NO_IMAGE_KHR;
+               EGLint y_attribs[] = {
+                       EGL_WIDTH, frame_width,
+                       EGL_HEIGHT, frame_height,
+                       EGL_LINUX_DRM_FOURCC_EXT, fourcc_code('R', '8', ' ', ' '),
+                       EGL_DMA_BUF_PLANE0_FD_EXT, EGLint(buf_info.handle),
+                       EGL_DMA_BUF_PLANE0_OFFSET_EXT, EGLint(surf->surface_image.offsets[0]),
+                       EGL_DMA_BUF_PLANE0_PITCH_EXT, EGLint(surf->surface_image.pitches[0]),
+                       EGL_NONE
+               };
+
+               surf->y_egl_image = eglCreateImageKHR(eglGetCurrentDisplay(), EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, y_attribs);
+               assert(surf->y_egl_image != EGL_NO_IMAGE_KHR);
+
+               // Associate Y image to a texture.
+               glBindTexture(GL_TEXTURE_2D, *y_tex);
+               glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, surf->y_egl_image);
+
+               // Create CbCr image.
+               surf->cbcr_egl_image = EGL_NO_IMAGE_KHR;
+               EGLint cbcr_attribs[] = {
+                       EGL_WIDTH, frame_width,
+                       EGL_HEIGHT, frame_height,
+                       EGL_LINUX_DRM_FOURCC_EXT, fourcc_code('G', 'R', '8', '8'),
+                       EGL_DMA_BUF_PLANE0_FD_EXT, EGLint(buf_info.handle),
+                       EGL_DMA_BUF_PLANE0_OFFSET_EXT, EGLint(surf->surface_image.offsets[1]),
+                       EGL_DMA_BUF_PLANE0_PITCH_EXT, EGLint(surf->surface_image.pitches[1]),
+                       EGL_NONE
+               };
+
+               surf->cbcr_egl_image = eglCreateImageKHR(eglGetCurrentDisplay(), EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, cbcr_attribs);
+               assert(surf->cbcr_egl_image != EGL_NO_IMAGE_KHR);
+
+               // Associate CbCr image to a texture.
+               glBindTexture(GL_TEXTURE_2D, *cbcr_tex);
+               glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, surf->cbcr_egl_image);
+       }
 
        return true;
 }
 
-void H264Encoder::add_audio(int64_t pts, std::vector<float> audio)
+void H264EncoderImpl::add_audio(int64_t pts, vector<float> audio)
 {
+       assert(!is_shutdown);
        {
                unique_lock<mutex> lock(frame_queue_mutex);
                pending_audio_frames[pts] = move(audio);
        }
-       frame_queue_nonempty.notify_one();
+       frame_queue_nonempty.notify_all();
 }
 
-
-void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const std::vector<RefCountedFrame> &input_frames)
+RefCountedGLsync H264EncoderImpl::end_frame(int64_t pts, const vector<RefCountedFrame> &input_frames)
 {
+       assert(!is_shutdown);
+
+       if (!use_zerocopy) {
+               GLSurface *surf = &gl_surfaces[current_storage_frame % SURFACE_NUM];
+
+               glPixelStorei(GL_PACK_ROW_LENGTH, 0);
+               check_error();
+
+               glBindBuffer(GL_PIXEL_PACK_BUFFER, surf->pbo);
+               check_error();
+
+               glBindTexture(GL_TEXTURE_2D, surf->y_tex);
+               check_error();
+               glGetTexImage(GL_TEXTURE_2D, 0, GL_RED, GL_UNSIGNED_BYTE, BUFFER_OFFSET(surf->y_offset));
+               check_error();
+
+               glBindTexture(GL_TEXTURE_2D, surf->cbcr_tex);
+               check_error();
+               glGetTexImage(GL_TEXTURE_2D, 0, GL_RG, GL_UNSIGNED_BYTE, BUFFER_OFFSET(surf->cbcr_offset));
+               check_error();
+
+               glBindTexture(GL_TEXTURE_2D, 0);
+               check_error();
+               glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+               check_error();
+
+               glMemoryBarrier(GL_TEXTURE_UPDATE_BARRIER_BIT | GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
+               check_error();
+       }
+
+       RefCountedGLsync fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
+       check_error();
+       glFlush();  // Make the H.264 thread see the fence as soon as possible.
+       check_error();
+
        {
                unique_lock<mutex> lock(frame_queue_mutex);
                pending_video_frames[current_storage_frame] = PendingFrame{ fence, input_frames, pts };
                ++current_storage_frame;
        }
-       frame_queue_nonempty.notify_one();
+       frame_queue_nonempty.notify_all();
+       return fence;
 }
 
-void H264Encoder::copy_thread_func()
+void H264EncoderImpl::shutdown()
+{
+       if (is_shutdown) {
+               return;
+       }
+
+       {
+               unique_lock<mutex> lock(frame_queue_mutex);
+               encode_thread_should_quit = true;
+               frame_queue_nonempty.notify_all();
+       }
+       encode_thread.join();
+       {
+               unique_lock<mutex> lock(storage_task_queue_mutex);
+               storage_thread_should_quit = true;
+               frame_queue_nonempty.notify_all();
+               storage_task_queue_changed.notify_all();
+       }
+       storage_thread.join();
+
+       release_encode();
+       deinit_va();
+       is_shutdown = true;
+}
+
+void H264EncoderImpl::open_output_file(const std::string &filename)
+{
+       AVFormatContext *avctx = avformat_alloc_context();
+       avctx->oformat = av_guess_format(NULL, filename.c_str(), NULL);
+       assert(filename.size() < sizeof(avctx->filename) - 1);
+       strcpy(avctx->filename, filename.c_str());
+
+       string url = "file:" + filename;
+       int ret = avio_open2(&avctx->pb, url.c_str(), AVIO_FLAG_WRITE, &avctx->interrupt_callback, NULL);
+       if (ret < 0) {
+               char tmp[AV_ERROR_MAX_STRING_SIZE];
+               fprintf(stderr, "%s: avio_open2() failed: %s\n", filename.c_str(), av_make_error_string(tmp, sizeof(tmp), ret));
+               exit(1);
+       }
+
+       file_mux.reset(new Mux(avctx, frame_width, frame_height, Mux::CODEC_H264, TIMEBASE, DEFAULT_AUDIO_OUTPUT_BIT_RATE));
+}
+
+void H264EncoderImpl::close_output_file()
+{
+        file_mux.reset();
+}
+
+void H264EncoderImpl::encode_thread_func()
 {
        int64_t last_dts = -1;
-       for ( ;; ) {
+       int gop_start_display_frame_num = 0;
+       for (int encoding_frame_num = 0; ; ++encoding_frame_num) {
                PendingFrame frame;
                int pts_lag;
-               encoding2display_order(current_frame_encoding, intra_period, intra_idr_period, ip_period,
-                                      &current_frame_display, &current_frame_type, &pts_lag);
-               if (current_frame_type == FRAME_IDR) {
+               int frame_type, display_frame_num;
+               encoding2display_order(encoding_frame_num, intra_period, intra_idr_period, ip_period,
+                                      &display_frame_num, &frame_type, &pts_lag);
+               if (frame_type == FRAME_IDR) {
                        numShortTerm = 0;
                        current_frame_num = 0;
-                       current_IDR_display = current_frame_display;
+                       gop_start_display_frame_num = display_frame_num;
                }
 
                {
                        unique_lock<mutex> lock(frame_queue_mutex);
-                       frame_queue_nonempty.wait(lock, [this]{ return copy_thread_should_quit || pending_video_frames.count(current_frame_display) != 0; });
-                       if (copy_thread_should_quit) return;
-                       frame = move(pending_video_frames[current_frame_display]);
-                       pending_video_frames.erase(current_frame_display);
+                       frame_queue_nonempty.wait(lock, [this, display_frame_num]{
+                               return encode_thread_should_quit || pending_video_frames.count(display_frame_num) != 0;
+                       });
+                       if (encode_thread_should_quit && pending_video_frames.count(display_frame_num) == 0) {
+                               // We have queued frames that were supposed to be B-frames,
+                               // but will be no P-frame to encode them against. Encode them all
+                               // as P-frames instead. Note that this happens under the mutex,
+                               // but nobody else uses it at this point, since we're shutting down,
+                               // so there's no contention.
+                               encode_remaining_frames_as_p(encoding_frame_num, gop_start_display_frame_num, last_dts);
+                               return;
+                       } else {
+                               frame = move(pending_video_frames[display_frame_num]);
+                               pending_video_frames.erase(display_frame_num);
+                       }
+               }
+
+               // Determine the dts of this frame.
+               int64_t dts;
+               if (pts_lag == -1) {
+                       assert(last_dts != -1);
+                       dts = last_dts + (TIMEBASE / MAX_FPS);
+               } else {
+                       dts = frame.pts - pts_lag;
+               }
+               last_dts = dts;
+
+               encode_frame(frame, encoding_frame_num, display_frame_num, gop_start_display_frame_num, frame_type, frame.pts, dts);
+       }
+}
+
+void H264EncoderImpl::encode_remaining_frames_as_p(int encoding_frame_num, int gop_start_display_frame_num, int64_t last_dts)
+{
+       if (pending_video_frames.empty()) {
+               return;
+       }
+
+       for (auto &pending_frame : pending_video_frames) {
+               int display_frame_num = pending_frame.first;
+               assert(display_frame_num > 0);
+               PendingFrame frame = move(pending_frame.second);
+               int64_t dts = last_dts + (TIMEBASE / MAX_FPS);
+               printf("Finalizing encode: Encoding leftover frame %d as P-frame instead of B-frame.\n", display_frame_num);
+               encode_frame(frame, encoding_frame_num++, display_frame_num, gop_start_display_frame_num, FRAME_P, frame.pts, dts);
+               last_dts = dts;
+       }
+
+       if (global_flags.uncompressed_video_to_http) {
+               // Add frames left in reorderer.
+               while (!reorderer->empty()) {
+                       pair<int64_t, const uint8_t *> output_frame = reorderer->get_first_frame();
+                       add_packet_for_uncompressed_frame(output_frame.first, output_frame.second);
                }
+       }
+}
 
-               // Wait for the GPU to be done with the frame.
-               glClientWaitSync(frame.fence.get(), 0, 0);
+void H264EncoderImpl::add_packet_for_uncompressed_frame(int64_t pts, const uint8_t *data)
+{
+       AVPacket pkt;
+       memset(&pkt, 0, sizeof(pkt));
+       pkt.buf = nullptr;
+       pkt.data = const_cast<uint8_t *>(data);
+       pkt.size = frame_width * frame_height * 2;
+       pkt.stream_index = 0;
+       pkt.flags = AV_PKT_FLAG_KEY;
+       httpd->add_packet(pkt, pts, pts);
+}
 
-               // Release back any input frames we needed to render this frame.
-               frame.input_frames.clear();
+namespace {
 
-               // Unmap the image.
-               GLSurface *surf = &gl_surfaces[current_frame_display % SURFACE_NUM];
+void memcpy_with_pitch(uint8_t *dst, const uint8_t *src, size_t src_width, size_t dst_pitch, size_t height)
+{
+       if (src_width == dst_pitch) {
+               memcpy(dst, src, src_width * height);
+       } else {
+               for (size_t y = 0; y < height; ++y) {
+                       const uint8_t *sptr = src + y * src_width;
+                       uint8_t *dptr = dst + y * dst_pitch;
+                       memcpy(dptr, sptr, src_width);
+               }
+       }
+}
+
+}  // namespace
+
+void H264EncoderImpl::encode_frame(H264EncoderImpl::PendingFrame frame, int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num,
+                                   int frame_type, int64_t pts, int64_t dts)
+{
+       // Wait for the GPU to be done with the frame.
+       GLenum sync_status;
+       do {
+               sync_status = glClientWaitSync(frame.fence.get(), 0, 1000000000);
+               check_error();
+       } while (sync_status == GL_TIMEOUT_EXPIRED);
+       assert(sync_status != GL_WAIT_FAILED);
+
+       // Release back any input frames we needed to render this frame.
+       frame.input_frames.clear();
+
+       GLSurface *surf = &gl_surfaces[display_frame_num % SURFACE_NUM];
+       VAStatus va_status;
+
+       if (use_zerocopy) {
                eglDestroyImageKHR(eglGetCurrentDisplay(), surf->y_egl_image);
                eglDestroyImageKHR(eglGetCurrentDisplay(), surf->cbcr_egl_image);
-               VAStatus va_status = vaReleaseBufferHandle(va_dpy, surf->surface_image.buf);
+               va_status = vaReleaseBufferHandle(va_dpy, surf->surface_image.buf);
                CHECK_VASTATUS(va_status, "vaReleaseBufferHandle");
-               va_status = vaDestroyImage(va_dpy, surf->surface_image.image_id);
-               CHECK_VASTATUS(va_status, "vaDestroyImage");
+       } else {
+               unsigned char *surface_p = nullptr;
+               vaMapBuffer(va_dpy, surf->surface_image.buf, (void **)&surface_p);
+
+               unsigned char *va_y_ptr = (unsigned char *)surface_p + surf->surface_image.offsets[0];
+               memcpy_with_pitch(va_y_ptr, surf->y_ptr, frame_width, surf->surface_image.pitches[0], frame_height);
 
-               VASurfaceID surface = surf->src_surface;
+               unsigned char *va_cbcr_ptr = (unsigned char *)surface_p + surf->surface_image.offsets[1];
+               memcpy_with_pitch(va_cbcr_ptr, surf->cbcr_ptr, (frame_width / 2) * sizeof(uint16_t), surf->surface_image.pitches[1], frame_height / 2);
 
-               // Schedule the frame for encoding.
-               va_status = vaBeginPicture(va_dpy, context_id, surface);
-               CHECK_VASTATUS(va_status, "vaBeginPicture");
+               va_status = vaUnmapBuffer(va_dpy, surf->surface_image.buf);
+               CHECK_VASTATUS(va_status, "vaUnmapBuffer");
 
-               if (current_frame_type == FRAME_IDR) {
-                       render_sequence();
-                       render_picture();            
-                       if (h264_packedheader) {
-                               render_packedsequence();
-                               render_packedpicture();
+               if (global_flags.uncompressed_video_to_http) {
+                       // Add uncompressed video. (Note that pts == dts here.)
+                       // Delay needs to match audio.
+                       pair<int64_t, const uint8_t *> output_frame = reorderer->reorder_frame(pts + global_delay(), reinterpret_cast<uint8_t *>(surf->y_ptr));
+                       if (output_frame.second != nullptr) {
+                               add_packet_for_uncompressed_frame(output_frame.first, output_frame.second);
                        }
-               } else {
-                       //render_sequence();
-                       render_picture();
                }
-               render_slice();
-               
-               va_status = vaEndPicture(va_dpy, context_id);
-               CHECK_VASTATUS(va_status, "vaEndPicture");
+       }
 
-               // Determine the pts and dts of this frame.
-               int64_t pts = frame.pts;
-               int64_t dts;
-               if (pts_lag == -1) {
-                       assert(last_dts != -1);
-                       dts = last_dts + (TIMEBASE / frame_rate);
-               } else {
-                       dts = pts - pts_lag;
-               }
-               last_dts = dts;
+       va_status = vaDestroyImage(va_dpy, surf->surface_image.image_id);
+       CHECK_VASTATUS(va_status, "vaDestroyImage");
 
-               // so now the data is done encoding (well, async job kicked off)...
-               // we send that to the storage thread
-               storage_task tmp;
-               tmp.display_order = current_frame_display;
-               tmp.frame_type = current_frame_type;
-               tmp.pts = pts;
-               tmp.dts = dts;
-               storage_task_enqueue(move(tmp));
-               
-               update_ReferenceFrames();
-               ++current_frame_encoding;
+       // Schedule the frame for encoding.
+       VASurfaceID va_surface = surf->src_surface;
+       va_status = vaBeginPicture(va_dpy, context_id, va_surface);
+       CHECK_VASTATUS(va_status, "vaBeginPicture");
+
+       if (frame_type == FRAME_IDR) {
+               render_sequence();
+               render_picture(frame_type, display_frame_num, gop_start_display_frame_num);
+               if (h264_packedheader) {
+                       render_packedsequence();
+                       render_packedpicture();
+               }
+       } else {
+               //render_sequence();
+               render_picture(frame_type, display_frame_num, gop_start_display_frame_num);
        }
+       render_slice(encoding_frame_num, display_frame_num, gop_start_display_frame_num, frame_type);
+
+       va_status = vaEndPicture(va_dpy, context_id);
+       CHECK_VASTATUS(va_status, "vaEndPicture");
+
+       // so now the data is done encoding (well, async job kicked off)...
+       // we send that to the storage thread
+       storage_task tmp;
+       tmp.display_order = display_frame_num;
+       tmp.frame_type = frame_type;
+       tmp.pts = pts;
+       tmp.dts = dts;
+       storage_task_enqueue(move(tmp));
+
+       update_ReferenceFrames(frame_type);
+}
+
+// Proxy object.
+H264Encoder::H264Encoder(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd)
+       : impl(new H264EncoderImpl(surface, va_display, width, height, httpd)) {}
+
+// Must be defined here because unique_ptr<> destructor needs to know the impl.
+H264Encoder::~H264Encoder() {}
+
+void H264Encoder::add_audio(int64_t pts, vector<float> audio)
+{
+       impl->add_audio(pts, audio);
+}
+
+bool H264Encoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
+{
+       return impl->begin_frame(y_tex, cbcr_tex);
+}
+
+RefCountedGLsync H264Encoder::end_frame(int64_t pts, const vector<RefCountedFrame> &input_frames)
+{
+       return impl->end_frame(pts, input_frames);
+}
+
+void H264Encoder::shutdown()
+{
+       impl->shutdown();
+}
+
+void H264Encoder::open_output_file(const std::string &filename)
+{
+       impl->open_output_file(filename);
+}
+
+void H264Encoder::close_output_file()
+{
+       impl->close_output_file();
 }