]> git.sesse.net Git - nageru/blobdiff - h264encode.cpp
Pull the Mux class out of HTTPD. (First step towards decoupling file and HTTP muxing.)
[nageru] / h264encode.cpp
index e2912d9a11c262c0eff3a9b1fe7a7355e9cb3f59..7bb0f8469a4b8c4daaa5a6551255afc55657e7c5 100644 (file)
@@ -38,6 +38,7 @@ extern "C" {
 
 #include "context.h"
 #include "defs.h"
+#include "flags.h"
 #include "httpd.h"
 #include "timebase.h"
 
@@ -84,6 +85,8 @@ class QSurface;
    
 #define BITSTREAM_ALLOCATE_STEPPING     4096
 #define SURFACE_NUM 16 /* 16 surfaces for source YUV */
+#define MAX_NUM_REF1 16 // Seemingly a hardware-fixed value, not related to SURFACE_NUM
+#define MAX_NUM_REF2 32 // Seemingly a hardware-fixed value, not related to SURFACE_NUM
 
 static constexpr unsigned int MaxFrameNum = (2<<16);
 static constexpr unsigned int MaxPicOrderCntLsb = (2<<8);
@@ -111,13 +114,89 @@ typedef struct __bitstream bitstream;
 
 using namespace std;
 
+// H.264 video comes out in encoding order (e.g. with two B-frames:
+// 0, 3, 1, 2, 6, 4, 5, etc.), but uncompressed video needs to
+// come in the right order. Since we do everything, including waiting
+// for the frames to come out of OpenGL, in encoding order, we need
+// a reordering buffer for uncompressed frames so that they come out
+// correctly. We go the super-lazy way of not making it understand
+// anything about the true order (which introduces some extra latency,
+// though); we know that for N B-frames we need at most (N-1) frames
+// in the reorder buffer, and can just sort on that.
+//
+// The class also deals with keeping a freelist as needed.
+class FrameReorderer {
+public:
+       FrameReorderer(unsigned queue_length, int width, int height);
+
+       // Returns the next frame to insert with its pts, if any. Otherwise -1 and nullptr.
+       // Does _not_ take ownership of data; a copy is taken if needed.
+       // The returned pointer is valid until the next call to reorder_frame, or destruction.
+       // As a special case, if queue_length == 0, will just return pts and data (no reordering needed).
+       pair<int64_t, const uint8_t *> reorder_frame(int64_t pts, const uint8_t *data);
+
+       // The same as reorder_frame, but without inserting anything. Used to empty the queue.
+       pair<int64_t, const uint8_t *> get_first_frame();
+
+       bool empty() const { return frames.empty(); }
+
+private:
+       unsigned queue_length;
+       int width, height;
+
+       priority_queue<pair<int64_t, uint8_t *>> frames;
+       stack<uint8_t *> freelist;  // Includes the last value returned from reorder_frame.
+
+       // Owns all the pointers. Normally, freelist and frames could do this themselves,
+       // except priority_queue doesn't work well with movable-only types.
+       vector<unique_ptr<uint8_t[]>> owner;
+};
+
+FrameReorderer::FrameReorderer(unsigned queue_length, int width, int height)
+    : queue_length(queue_length), width(width), height(height)
+{
+       for (unsigned i = 0; i < queue_length; ++i) {
+               owner.emplace_back(new uint8_t[width * height * 2]);
+               freelist.push(owner.back().get());
+       }
+}
+
+pair<int64_t, const uint8_t *> FrameReorderer::reorder_frame(int64_t pts, const uint8_t *data)
+{
+       if (queue_length == 0) {
+               return make_pair(pts, data);
+       }
+
+       assert(!freelist.empty());
+       uint8_t *storage = freelist.top();
+       freelist.pop();
+       memcpy(storage, data, width * height * 2);
+       frames.emplace(-pts, storage);  // Invert pts to get smallest first.
+
+       if (frames.size() >= queue_length) {
+               return get_first_frame();
+       } else {
+               return make_pair(-1, nullptr);
+       }
+}
+
+pair<int64_t, const uint8_t *> FrameReorderer::get_first_frame()
+{
+       assert(!frames.empty());
+       pair<int64_t, uint8_t *> storage = frames.top();
+       frames.pop();
+       int64_t pts = storage.first;
+       freelist.push(storage.second);
+       return make_pair(-pts, storage.second);  // Re-invert pts (see reorder_frame()).
+}
+
 class H264EncoderImpl {
 public:
        H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd);
        ~H264EncoderImpl();
        void add_audio(int64_t pts, vector<float> audio);
        bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex);
-       void end_frame(RefCountedGLsync fence, int64_t pts, const vector<RefCountedFrame> &input_frames);
+       RefCountedGLsync end_frame(int64_t pts, const vector<RefCountedFrame> &input_frames);
        void shutdown();
 
 private:
@@ -133,11 +212,21 @@ private:
                int64_t pts;
        };
 
+       // So we never get negative dts.
+       int64_t global_delay() const {
+               return int64_t(ip_period - 1) * (TIMEBASE / MAX_FPS);
+       }
+
        void encode_thread_func();
        void encode_remaining_frames_as_p(int encoding_frame_num, int gop_start_display_frame_num, int64_t last_dts);
+       void add_packet_for_uncompressed_frame(int64_t pts, const uint8_t *data);
        void encode_frame(PendingFrame frame, int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num,
                          int frame_type, int64_t pts, int64_t dts);
        void storage_task_thread();
+       void encode_audio(const vector<float> &audio,
+                         int64_t audio_pts,
+                         AVCodecContext *ctx,
+                         HTTPD::PacketDestination destination);
        void storage_task_enqueue(storage_task task);
        void save_codeddata(storage_task task);
        int render_packedsequence();
@@ -154,6 +243,7 @@ private:
        int build_packed_slice_buffer(unsigned char **header_buffer);
        int init_va(const string &va_display);
        int deinit_va();
+       void enable_zerocopy_if_possible();
        VADisplay va_open_display(const string &va_display);
        void va_close_display(VADisplay va_dpy);
        int setup_encode();
@@ -184,7 +274,9 @@ private:
        QSurface *surface;
 
        AVCodecContext *context_audio;
+       AVFrame *audio_frame = nullptr;
        HTTPD *httpd;
+       unique_ptr<FrameReorderer> reorderer;
 
        Display *x11_display = nullptr;
 
@@ -205,7 +297,6 @@ private:
                EGLImage y_egl_image, cbcr_egl_image;
 
                // Only if use_zerocopy == false.
-               RefCountedGLsync readback_done_fence;
                GLuint pbo;
                uint8_t *y_ptr, *cbcr_ptr;
                size_t y_offset, cbcr_offset;
@@ -218,7 +309,7 @@ private:
        VAEncPictureParameterBufferH264 pic_param;
        VAEncSliceParameterBufferH264 slice_param;
        VAPictureH264 CurrentCurrPic;
-       VAPictureH264 ReferenceFrames[16], RefPicList0_P[32], RefPicList0_B[32], RefPicList1_B[32];
+       VAPictureH264 ReferenceFrames[MAX_NUM_REF1], RefPicList0_P[MAX_NUM_REF2], RefPicList0_B[MAX_NUM_REF2], RefPicList1_B[MAX_NUM_REF2];
 
        // Static quality settings.
        static constexpr unsigned int frame_bitrate = 15000000 / 60;  // Doesn't really matter; only initial_qp does.
@@ -845,6 +936,16 @@ static const char *rc_to_string(int rc_mode)
     }
 }
 
+void H264EncoderImpl::enable_zerocopy_if_possible()
+{
+       if (global_flags.uncompressed_video_to_http) {
+               fprintf(stderr, "Disabling zerocopy H.264 encoding due to --uncompressed_video_to_http.\n");
+               use_zerocopy = false;
+       } else {
+               use_zerocopy = true;
+       }
+}
+
 VADisplay H264EncoderImpl::va_open_display(const string &va_display)
 {
        if (va_display.empty()) {
@@ -853,7 +954,7 @@ VADisplay H264EncoderImpl::va_open_display(const string &va_display)
                        fprintf(stderr, "error: can't connect to X server!\n");
                        return NULL;
                }
-               use_zerocopy = true;
+               enable_zerocopy_if_possible();
                return vaGetDisplay(x11_display);
        } else if (va_display[0] != '/') {
                x11_display = XOpenDisplay(va_display.c_str());
@@ -861,7 +962,7 @@ VADisplay H264EncoderImpl::va_open_display(const string &va_display)
                        fprintf(stderr, "error: can't connect to X server!\n");
                        return NULL;
                }
-               use_zerocopy = true;
+               enable_zerocopy_if_possible();
                return vaGetDisplay(x11_display);
        } else {
                drm_fd = open(va_display.c_str(), O_RDWR);
@@ -925,7 +1026,7 @@ int H264EncoderImpl::init_va(const string &va_display)
     
     if (support_encode == 0) {
         printf("Can't find VAEntrypointEncSlice for H264 profiles. If you are using a non-Intel GPU\n");
-        printf("but have one in your system, try launching Nageru with --va-display /dev/dri/card0\n");
+        printf("but have one in your system, try launching Nageru with --va-display /dev/dri/renderD128\n");
         printf("to use VA-API against DRM instead of X11.\n");
         exit(1);
     } else {
@@ -1112,10 +1213,10 @@ int H264EncoderImpl::setup_encode()
             glBindBuffer(GL_PIXEL_PACK_BUFFER, gl_surfaces[i].pbo);
             glBufferStorage(GL_PIXEL_PACK_BUFFER, frame_width * frame_height * 2, nullptr, GL_MAP_READ_BIT | GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
             uint8_t *ptr = (uint8_t *)glMapBufferRange(GL_PIXEL_PACK_BUFFER, 0, frame_width * frame_height * 2, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
-            gl_surfaces[i].y_ptr = ptr;
-            gl_surfaces[i].cbcr_ptr = ptr + frame_width * frame_height;
             gl_surfaces[i].y_offset = 0;
             gl_surfaces[i].cbcr_offset = frame_width * frame_height;
+            gl_surfaces[i].y_ptr = ptr + gl_surfaces[i].y_offset;
+            gl_surfaces[i].cbcr_ptr = ptr + gl_surfaces[i].cbcr_offset;
             glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
         }
     }
@@ -1299,7 +1400,7 @@ int H264EncoderImpl::render_picture(int frame_type, int display_frame_num, int g
     CurrentCurrPic = pic_param.CurrPic;
 
     memcpy(pic_param.ReferenceFrames, ReferenceFrames, numShortTerm*sizeof(VAPictureH264));
-    for (i = numShortTerm; i < SURFACE_NUM; i++) {
+    for (i = numShortTerm; i < MAX_NUM_REF1; i++) {
         pic_param.ReferenceFrames[i].picture_id = VA_INVALID_SURFACE;
         pic_param.ReferenceFrames[i].flags = VA_PICTURE_H264_INVALID;
     }
@@ -1449,7 +1550,7 @@ int H264EncoderImpl::render_slice(int encoding_frame_num, int display_frame_num,
         int refpiclist0_max = h264_maxref & 0xffff;
         memcpy(slice_param.RefPicList0, RefPicList0_P, refpiclist0_max*sizeof(VAPictureH264));
 
-        for (i = refpiclist0_max; i < 32; i++) {
+        for (i = refpiclist0_max; i < MAX_NUM_REF2; i++) {
             slice_param.RefPicList0[i].picture_id = VA_INVALID_SURFACE;
             slice_param.RefPicList0[i].flags = VA_PICTURE_H264_INVALID;
         }
@@ -1458,13 +1559,13 @@ int H264EncoderImpl::render_slice(int encoding_frame_num, int display_frame_num,
         int refpiclist1_max = (h264_maxref >> 16) & 0xffff;
 
         memcpy(slice_param.RefPicList0, RefPicList0_B, refpiclist0_max*sizeof(VAPictureH264));
-        for (i = refpiclist0_max; i < 32; i++) {
+        for (i = refpiclist0_max; i < MAX_NUM_REF2; i++) {
             slice_param.RefPicList0[i].picture_id = VA_INVALID_SURFACE;
             slice_param.RefPicList0[i].flags = VA_PICTURE_H264_INVALID;
         }
 
         memcpy(slice_param.RefPicList1, RefPicList1_B, refpiclist1_max*sizeof(VAPictureH264));
-        for (i = refpiclist1_max; i < 32; i++) {
+        for (i = refpiclist1_max; i < MAX_NUM_REF2; i++) {
             slice_param.RefPicList1[i].picture_id = VA_INVALID_SURFACE;
             slice_param.RefPicList1[i].flags = VA_PICTURE_H264_INVALID;
         }
@@ -1493,116 +1594,117 @@ int H264EncoderImpl::render_slice(int encoding_frame_num, int display_frame_num,
 
 void H264EncoderImpl::save_codeddata(storage_task task)
 {    
-    VACodedBufferSegment *buf_list = NULL;
-    VAStatus va_status;
+       VACodedBufferSegment *buf_list = NULL;
+       VAStatus va_status;
 
-    string data;
+       string data;
 
-    const int64_t global_delay = int64_t(ip_period - 1) * (TIMEBASE / MAX_FPS);  // So we never get negative dts.
+       va_status = vaMapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list));
+       CHECK_VASTATUS(va_status, "vaMapBuffer");
+       while (buf_list != NULL) {
+               data.append(reinterpret_cast<const char *>(buf_list->buf), buf_list->size);
+               buf_list = (VACodedBufferSegment *) buf_list->next;
+       }
+       vaUnmapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf);
 
-    va_status = vaMapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list));
-    CHECK_VASTATUS(va_status, "vaMapBuffer");
-    while (buf_list != NULL) {
-        data.append(reinterpret_cast<const char *>(buf_list->buf), buf_list->size);
-        buf_list = (VACodedBufferSegment *) buf_list->next;
-    }
-    vaUnmapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf);
-
-    {
-        // Add video.
-        AVPacket pkt;
-        memset(&pkt, 0, sizeof(pkt));
-        pkt.buf = nullptr;
-        pkt.data = reinterpret_cast<uint8_t *>(&data[0]);
-        pkt.size = data.size();
-        pkt.stream_index = 0;
-        if (task.frame_type == FRAME_IDR || task.frame_type == FRAME_I) {
-            pkt.flags = AV_PKT_FLAG_KEY;
-        } else {
-            pkt.flags = 0;
-        }
-        //pkt.duration = 1;
-        httpd->add_packet(pkt, task.pts + global_delay, task.dts + global_delay);
-    }
-    // Encode and add all audio frames up to and including the pts of this video frame.
-    for ( ;; ) {
-        int64_t audio_pts;
-        vector<float> audio;
-        {
-             unique_lock<mutex> lock(frame_queue_mutex);
-             frame_queue_nonempty.wait(lock, [this]{ return storage_thread_should_quit || !pending_audio_frames.empty(); });
-             if (storage_thread_should_quit && pending_audio_frames.empty()) return;
-             auto it = pending_audio_frames.begin();
-             if (it->first > task.pts) break;
-             audio_pts = it->first;
-             audio = move(it->second);
-             pending_audio_frames.erase(it); 
-        }
+       {
+               // Add video.
+               AVPacket pkt;
+               memset(&pkt, 0, sizeof(pkt));
+               pkt.buf = nullptr;
+               pkt.data = reinterpret_cast<uint8_t *>(&data[0]);
+               pkt.size = data.size();
+               pkt.stream_index = 0;
+               if (task.frame_type == FRAME_IDR) {
+                       pkt.flags = AV_PKT_FLAG_KEY;
+               } else {
+                       pkt.flags = 0;
+               }
+               //pkt.duration = 1;
+               httpd->add_packet(pkt, task.pts + global_delay(), task.dts + global_delay(),
+                               global_flags.uncompressed_video_to_http ? HTTPD::DESTINATION_FILE_ONLY : HTTPD::DESTINATION_FILE_AND_HTTP);
+       }
+       // Encode and add all audio frames up to and including the pts of this video frame.
+       for ( ;; ) {
+               int64_t audio_pts;
+               vector<float> audio;
+               {
+                       unique_lock<mutex> lock(frame_queue_mutex);
+                       frame_queue_nonempty.wait(lock, [this]{ return storage_thread_should_quit || !pending_audio_frames.empty(); });
+                       if (storage_thread_should_quit && pending_audio_frames.empty()) return;
+                       auto it = pending_audio_frames.begin();
+                       if (it->first > task.pts) break;
+                       audio_pts = it->first;
+                       audio = move(it->second);
+                       pending_audio_frames.erase(it); 
+               }
 
-        AVFrame *frame = avcodec_alloc_frame();
-        frame->nb_samples = audio.size() / 2;
-        frame->format = AV_SAMPLE_FMT_S32;
-        frame->channel_layout = AV_CH_LAYOUT_STEREO;
+               encode_audio(audio, audio_pts, context_audio, HTTPD::DESTINATION_FILE_AND_HTTP);
 
-        unique_ptr<int32_t[]> int_samples(new int32_t[audio.size()]);
-        int ret = avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), audio.size() * sizeof(int32_t), 1);
-        if (ret < 0) {
-            fprintf(stderr, "avcodec_fill_audio_frame() failed with %d\n", ret);
-            exit(1);
-        }
-        for (int i = 0; i < frame->nb_samples * 2; ++i) {
-            if (audio[i] >= 1.0f) {
-                int_samples[i] = 2147483647;
-            } else if (audio[i] <= -1.0f) {
-                int_samples[i] = -2147483647;
-            } else {
-                int_samples[i] = lrintf(audio[i] * 2147483647.0f);
-            }
-        }
+               if (audio_pts == task.pts) break;
+       }
+}
 
-        AVPacket pkt;
-        av_init_packet(&pkt);
-        pkt.data = nullptr;
-        pkt.size = 0;
-        int got_output;
-        avcodec_encode_audio2(context_audio, &pkt, frame, &got_output);
-        if (got_output) {
-            pkt.stream_index = 1;
-            httpd->add_packet(pkt, audio_pts + global_delay, audio_pts + global_delay);
-        }
-        // TODO: Delayed frames.
-        avcodec_free_frame(&frame);
-        av_free_packet(&pkt);
-        if (audio_pts == task.pts) break;
-    }
+void H264EncoderImpl::encode_audio(
+       const vector<float> &audio,
+       int64_t audio_pts,
+       AVCodecContext *ctx,
+       HTTPD::PacketDestination destination)
+{
+       audio_frame->nb_samples = audio.size() / 2;
+       audio_frame->channel_layout = AV_CH_LAYOUT_STEREO;
+
+       unique_ptr<float[]> planar_samples;
+       unique_ptr<int32_t[]> int_samples;
+
+       if (ctx->sample_fmt == AV_SAMPLE_FMT_FLTP) {
+               audio_frame->format = AV_SAMPLE_FMT_FLTP;
+               planar_samples.reset(new float[audio.size()]);
+               avcodec_fill_audio_frame(audio_frame, 2, AV_SAMPLE_FMT_FLTP, (const uint8_t*)planar_samples.get(), audio.size() * sizeof(float), 0);
+               for (int i = 0; i < audio_frame->nb_samples; ++i) {
+                       planar_samples[i] = audio[i * 2 + 0];
+                       planar_samples[i + audio_frame->nb_samples] = audio[i * 2 + 1];
+               }
+       } else {
+               assert(ctx->sample_fmt == AV_SAMPLE_FMT_S32);
+               int_samples.reset(new int32_t[audio.size()]);
+               int ret = avcodec_fill_audio_frame(audio_frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), audio.size() * sizeof(int32_t), 1);
+               if (ret < 0) {
+                       fprintf(stderr, "avcodec_fill_audio_frame() failed with %d\n", ret);
+                       exit(1);
+               }
+               for (int i = 0; i < audio_frame->nb_samples * 2; ++i) {
+                       if (audio[i] >= 1.0f) {
+                               int_samples[i] = 2147483647;
+                       } else if (audio[i] <= -1.0f) {
+                               int_samples[i] = -2147483647;
+                       } else {
+                               int_samples[i] = lrintf(audio[i] * 2147483647.0f);
+                       }
+               }
+       }
 
-#if 0
-    printf("\r      "); /* return back to startpoint */
-    switch (encode_order % 4) {
-        case 0:
-            printf("|");
-            break;
-        case 1:
-            printf("/");
-            break;
-        case 2:
-            printf("-");
-            break;
-        case 3:
-            printf("\\");
-            break;
-    }
-    printf("%08lld", encode_order);
-#endif
+       AVPacket pkt;
+       av_init_packet(&pkt);
+       pkt.data = nullptr;
+       pkt.size = 0;
+       int got_output = 0;
+       avcodec_encode_audio2(context_audio, &pkt, audio_frame, &got_output);
+       if (got_output) {
+               pkt.stream_index = 1;
+               pkt.flags = AV_PKT_FLAG_KEY;
+               httpd->add_packet(pkt, audio_pts + global_delay(), audio_pts + global_delay(), destination);
+       }
+       // TODO: Delayed frames.
+       av_frame_unref(audio_frame);
+       av_free_packet(&pkt);
 }
 
-
 // this is weird. but it seems to put a new frame onto the queue
 void H264EncoderImpl::storage_task_enqueue(storage_task task)
 {
        unique_lock<mutex> lock(storage_task_queue_mutex);
        storage_task_queue.push(move(task));
-       srcsurface_status[task.display_order % SURFACE_NUM] = SRC_SURFACE_IN_ENCODING;
        storage_task_queue_changed.notify_all();
 }
 
@@ -1666,23 +1768,54 @@ int H264EncoderImpl::deinit_va()
     return 0;
 }
 
+namespace {
 
-H264EncoderImpl::H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd)
-       : current_storage_frame(0), surface(surface), httpd(httpd)
+void init_audio_encoder(const string &codec_name, int bit_rate, AVCodecContext **ctx)
 {
-       AVCodec *codec_audio = avcodec_find_encoder(AUDIO_OUTPUT_CODEC);
-       context_audio = avcodec_alloc_context3(codec_audio);
-       context_audio->bit_rate = AUDIO_OUTPUT_BIT_RATE;
+       AVCodec *codec_audio = avcodec_find_encoder_by_name(codec_name.c_str());
+       if (codec_audio == nullptr) {
+               fprintf(stderr, "ERROR: Could not find codec '%s'\n", codec_name.c_str());
+               exit(1);
+       }
+
+       AVCodecContext *context_audio = avcodec_alloc_context3(codec_audio);
+       context_audio->bit_rate = bit_rate;
        context_audio->sample_rate = OUTPUT_FREQUENCY;
-       context_audio->sample_fmt = AUDIO_OUTPUT_SAMPLE_FMT;
+
+       // Choose sample format; we currently only support these two
+       // (see encode_audio), so we're a bit picky.
+       const AVSampleFormat *ptr = codec_audio->sample_fmts;
+       for ( ; *ptr != -1; ++ptr) {
+               if (*ptr == AV_SAMPLE_FMT_FLTP || *ptr == AV_SAMPLE_FMT_S32) {
+                       context_audio->sample_fmt = *ptr;
+                       break;
+               }
+       }
+       if (*ptr == -1) {
+               fprintf(stderr, "ERROR: Audio codec does not support fltp or s32 sample formats\n");
+               exit(1);
+       }
+
        context_audio->channels = 2;
        context_audio->channel_layout = AV_CH_LAYOUT_STEREO;
        context_audio->time_base = AVRational{1, TIMEBASE};
        if (avcodec_open2(context_audio, codec_audio, NULL) < 0) {
-               fprintf(stderr, "Could not open codec\n");
+               fprintf(stderr, "Could not open codec '%s'\n", codec_name.c_str());
                exit(1);
        }
 
+       *ctx = context_audio;
+}
+
+}  // namespace
+
+H264EncoderImpl::H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd)
+       : current_storage_frame(0), surface(surface), httpd(httpd)
+{
+       init_audio_encoder(AUDIO_OUTPUT_CODEC_NAME, AUDIO_OUTPUT_BIT_RATE, &context_audio);
+
+       audio_frame = av_frame_alloc();
+
        frame_width = width;
        frame_height = height;
        frame_width_mbaligned = (frame_width + 15) & (~15);
@@ -1690,6 +1823,10 @@ H264EncoderImpl::H264EncoderImpl(QSurface *surface, const string &va_display, in
 
        //print_input();
 
+       if (global_flags.uncompressed_video_to_http) {
+               reorderer.reset(new FrameReorderer(ip_period - 1, frame_width, frame_height));
+       }
+
        init_va(va_display);
        setup_encode();
 
@@ -1718,6 +1855,9 @@ H264EncoderImpl::H264EncoderImpl(QSurface *surface, const string &va_display, in
 H264EncoderImpl::~H264EncoderImpl()
 {
        shutdown();
+       av_frame_free(&audio_frame);
+
+       // TODO: Destroy context.
 }
 
 bool H264EncoderImpl::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
@@ -1726,7 +1866,12 @@ bool H264EncoderImpl::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
        {
                // Wait until this frame slot is done encoding.
                unique_lock<mutex> lock(storage_task_queue_mutex);
+               if (srcsurface_status[current_storage_frame % SURFACE_NUM] != SRC_SURFACE_FREE) {
+                       fprintf(stderr, "Warning: Slot %d (for frame %d) is still encoding, rendering has to wait for H.264 encoder\n",
+                               current_storage_frame % SURFACE_NUM, current_storage_frame);
+               }
                storage_task_queue_changed.wait(lock, [this]{ return storage_thread_should_quit || (srcsurface_status[current_storage_frame % SURFACE_NUM] == SRC_SURFACE_FREE); });
+               srcsurface_status[current_storage_frame % SURFACE_NUM] = SRC_SURFACE_IN_ENCODING;
                if (storage_thread_should_quit) return false;
        }
 
@@ -1796,7 +1941,7 @@ void H264EncoderImpl::add_audio(int64_t pts, vector<float> audio)
        frame_queue_nonempty.notify_all();
 }
 
-void H264EncoderImpl::end_frame(RefCountedGLsync fence, int64_t pts, const vector<RefCountedFrame> &input_frames)
+RefCountedGLsync H264EncoderImpl::end_frame(int64_t pts, const vector<RefCountedFrame> &input_frames)
 {
        assert(!is_shutdown);
 
@@ -1824,18 +1969,22 @@ void H264EncoderImpl::end_frame(RefCountedGLsync fence, int64_t pts, const vecto
                glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
                check_error();
 
-               glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
-               check_error();
-               fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
+               glMemoryBarrier(GL_TEXTURE_UPDATE_BARRIER_BIT | GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
                check_error();
        }
 
+       RefCountedGLsync fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
+       check_error();
+       glFlush();  // Make the H.264 thread see the fence as soon as possible.
+       check_error();
+
        {
                unique_lock<mutex> lock(frame_queue_mutex);
                pending_video_frames[current_storage_frame] = PendingFrame{ fence, input_frames, pts };
                ++current_storage_frame;
        }
        frame_queue_nonempty.notify_all();
+       return fence;
 }
 
 void H264EncoderImpl::shutdown()
@@ -1927,6 +2076,26 @@ void H264EncoderImpl::encode_remaining_frames_as_p(int encoding_frame_num, int g
                encode_frame(frame, encoding_frame_num++, display_frame_num, gop_start_display_frame_num, FRAME_P, frame.pts, dts);
                last_dts = dts;
        }
+
+       if (global_flags.uncompressed_video_to_http) {
+               // Add frames left in reorderer.
+               while (!reorderer->empty()) {
+                       pair<int64_t, const uint8_t *> output_frame = reorderer->get_first_frame();
+                       add_packet_for_uncompressed_frame(output_frame.first, output_frame.second);
+               }
+       }
+}
+
+void H264EncoderImpl::add_packet_for_uncompressed_frame(int64_t pts, const uint8_t *data)
+{
+       AVPacket pkt;
+       memset(&pkt, 0, sizeof(pkt));
+       pkt.buf = nullptr;
+       pkt.data = const_cast<uint8_t *>(data);
+       pkt.size = frame_width * frame_height * 2;
+       pkt.stream_index = 0;
+       pkt.flags = AV_PKT_FLAG_KEY;
+       httpd->add_packet(pkt, pts, pts, HTTPD::DESTINATION_HTTP_ONLY);
 }
 
 namespace {
@@ -1950,7 +2119,12 @@ void H264EncoderImpl::encode_frame(H264EncoderImpl::PendingFrame frame, int enco
                                    int frame_type, int64_t pts, int64_t dts)
 {
        // Wait for the GPU to be done with the frame.
-       glClientWaitSync(frame.fence.get(), 0, 0);
+       GLenum sync_status;
+       do {
+               sync_status = glClientWaitSync(frame.fence.get(), 0, 1000000000);
+               check_error();
+       } while (sync_status == GL_TIMEOUT_EXPIRED);
+       assert(sync_status != GL_WAIT_FAILED);
 
        // Release back any input frames we needed to render this frame.
        frame.input_frames.clear();
@@ -1967,14 +2141,23 @@ void H264EncoderImpl::encode_frame(H264EncoderImpl::PendingFrame frame, int enco
                unsigned char *surface_p = nullptr;
                vaMapBuffer(va_dpy, surf->surface_image.buf, (void **)&surface_p);
 
-               unsigned char *y_ptr = (unsigned char *)surface_p;
-               memcpy_with_pitch(y_ptr, surf->y_ptr, frame_width, surf->surface_image.pitches[0], frame_height);
+               unsigned char *va_y_ptr = (unsigned char *)surface_p + surf->surface_image.offsets[0];
+               memcpy_with_pitch(va_y_ptr, surf->y_ptr, frame_width, surf->surface_image.pitches[0], frame_height);
 
-               unsigned char *cbcr_ptr = (unsigned char *)surface_p + frame_width * frame_height;
-               memcpy_with_pitch(cbcr_ptr, surf->cbcr_ptr, (frame_width / 2) * sizeof(uint16_t), surf->surface_image.pitches[1], frame_height / 2);
+               unsigned char *va_cbcr_ptr = (unsigned char *)surface_p + surf->surface_image.offsets[1];
+               memcpy_with_pitch(va_cbcr_ptr, surf->cbcr_ptr, (frame_width / 2) * sizeof(uint16_t), surf->surface_image.pitches[1], frame_height / 2);
 
                va_status = vaUnmapBuffer(va_dpy, surf->surface_image.buf);
                CHECK_VASTATUS(va_status, "vaUnmapBuffer");
+
+               if (global_flags.uncompressed_video_to_http) {
+                       // Add uncompressed video. (Note that pts == dts here.)
+                       // Delay needs to match audio.
+                       pair<int64_t, const uint8_t *> output_frame = reorderer->reorder_frame(pts + global_delay(), reinterpret_cast<uint8_t *>(surf->y_ptr));
+                       if (output_frame.second != nullptr) {
+                               add_packet_for_uncompressed_frame(output_frame.first, output_frame.second);
+                       }
+               }
        }
 
        va_status = vaDestroyImage(va_dpy, surf->surface_image.image_id);
@@ -2030,9 +2213,9 @@ bool H264Encoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
        return impl->begin_frame(y_tex, cbcr_tex);
 }
 
-void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const vector<RefCountedFrame> &input_frames)
+RefCountedGLsync H264Encoder::end_frame(int64_t pts, const vector<RefCountedFrame> &input_frames)
 {
-       impl->end_frame(fence, pts, input_frames);
+       return impl->end_frame(pts, input_frames);
 }
 
 void H264Encoder::shutdown()