]> git.sesse.net Git - nageru/blobdiff - quicksync_encoder.cpp
Move find_received_timestamp() into print_latency.h, so that multiple consumers can...
[nageru] / quicksync_encoder.cpp
index 55a0eabb5e3a5d191cbe90e8a889ac118ac10b6a..749eb63d7056e638f8c8f4ac1d640706532b3d3e 100644 (file)
@@ -1,4 +1,3 @@
-//#include "sysdeps.h"
 #include "quicksync_encoder.h"
 
 #include <movit/resource_pool.h>  // Must be above the Xlib includes.
@@ -9,10 +8,6 @@
 #include <assert.h>
 #include <epoxy/egl.h>
 #include <fcntl.h>
-#include <libavcodec/avcodec.h>
-#include <libavformat/avio.h>
-#include <libavutil/error.h>
-#include <libdrm/drm_fourcc.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 #include <va/va_enc_h264.h>
 #include <va/va_x11.h>
 #include <algorithm>
+#include <chrono>
 #include <condition_variable>
 #include <cstddef>
 #include <cstdint>
+#include <functional>
 #include <map>
 #include <memory>
 #include <mutex>
 #include <thread>
 #include <utility>
 
+extern "C" {
+
+#include <libavcodec/avcodec.h>
+#include <libavformat/avio.h>
+#include <libavutil/error.h>
+#include <libdrm/drm_fourcc.h>
+
+}  // namespace
+
 #include "audio_encoder.h"
 #include "context.h"
 #include "defs.h"
+#include "disk_space_estimator.h"
 #include "ffmpeg_raii.h"
 #include "flags.h"
 #include "mux.h"
+#include "print_latency.h"
+#include "quicksync_encoder_impl.h"
 #include "ref_counted_frame.h"
 #include "timebase.h"
 #include "x264_encoder.h"
 
 using namespace std;
+using namespace std::chrono;
+using namespace std::placeholders;
 
 class QOpenGLContext;
 class QSurface;
@@ -87,9 +98,6 @@ class QSurface;
 #define PROFILE_IDC_HIGH        100
    
 #define BITSTREAM_ALLOCATE_STEPPING     4096
-#define SURFACE_NUM 16 /* 16 surfaces for source YUV */
-#define MAX_NUM_REF1 16 // Seemingly a hardware-fixed value, not related to SURFACE_NUM
-#define MAX_NUM_REF2 32 // Seemingly a hardware-fixed value, not related to SURFACE_NUM
 
 static constexpr unsigned int MaxFrameNum = (2<<16);
 static constexpr unsigned int MaxPicOrderCntLsb = (2<<8);
@@ -108,251 +116,8 @@ static constexpr int rc_default_modes[] = {  // Priority list of modes.
 #define SRC_SURFACE_FREE        0
 #define SRC_SURFACE_IN_ENCODING 1
     
-struct __bitstream {
-    unsigned int *buffer;
-    int bit_offset;
-    int max_size_in_dword;
-};
-typedef struct __bitstream bitstream;
-
 using namespace std;
 
-// H.264 video comes out in encoding order (e.g. with two B-frames:
-// 0, 3, 1, 2, 6, 4, 5, etc.), but uncompressed video needs to
-// come in the right order. Since we do everything, including waiting
-// for the frames to come out of OpenGL, in encoding order, we need
-// a reordering buffer for uncompressed frames so that they come out
-// correctly. We go the super-lazy way of not making it understand
-// anything about the true order (which introduces some extra latency,
-// though); we know that for N B-frames we need at most (N-1) frames
-// in the reorder buffer, and can just sort on that.
-//
-// The class also deals with keeping a freelist as needed.
-class FrameReorderer {
-public:
-       FrameReorderer(unsigned queue_length, int width, int height);
-
-       struct Frame {
-               int64_t pts, duration;
-               uint8_t *data;
-
-               // Invert to get the smallest pts first.
-               bool operator< (const Frame &other) const { return pts > other.pts; }
-       };
-
-       // Returns the next frame to insert with its pts, if any. Otherwise -1 and nullptr.
-       // Does _not_ take ownership of data; a copy is taken if needed.
-       // The returned pointer is valid until the next call to reorder_frame, or destruction.
-       // As a special case, if queue_length == 0, will just return pts and data (no reordering needed).
-       Frame reorder_frame(int64_t pts, int64_t duration, uint8_t *data);
-
-       // The same as reorder_frame, but without inserting anything. Used to empty the queue.
-       Frame get_first_frame();
-
-       bool empty() const { return frames.empty(); }
-
-private:
-       unsigned queue_length;
-       int width, height;
-
-       priority_queue<Frame> frames;
-       stack<uint8_t *> freelist;  // Includes the last value returned from reorder_frame.
-
-       // Owns all the pointers. Normally, freelist and frames could do this themselves,
-       // except priority_queue doesn't work well with movable-only types.
-       vector<unique_ptr<uint8_t[]>> owner;
-};
-
-FrameReorderer::FrameReorderer(unsigned queue_length, int width, int height)
-    : queue_length(queue_length), width(width), height(height)
-{
-       for (unsigned i = 0; i < queue_length; ++i) {
-               owner.emplace_back(new uint8_t[width * height * 2]);
-               freelist.push(owner.back().get());
-       }
-}
-
-FrameReorderer::Frame FrameReorderer::reorder_frame(int64_t pts, int64_t duration, uint8_t *data)
-{
-       if (queue_length == 0) {
-               return Frame{pts, duration, data};
-       }
-
-       assert(!freelist.empty());
-       uint8_t *storage = freelist.top();
-       freelist.pop();
-       memcpy(storage, data, width * height * 2);
-       frames.push(Frame{pts, duration, storage});
-
-       if (frames.size() >= queue_length) {
-               return get_first_frame();
-       } else {
-               return Frame{-1, -1, nullptr};
-       }
-}
-
-FrameReorderer::Frame FrameReorderer::get_first_frame()
-{
-       assert(!frames.empty());
-       Frame storage = frames.top();
-       frames.pop();
-       freelist.push(storage.data);
-       return storage;
-}
-
-class QuickSyncEncoderImpl {
-public:
-       QuickSyncEncoderImpl(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const string &va_display, int width, int height, AVOutputFormat *oformat, X264Encoder *x264_encoder);
-       ~QuickSyncEncoderImpl();
-       void add_audio(int64_t pts, vector<float> audio);
-       bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex);
-       RefCountedGLsync end_frame(int64_t pts, int64_t duration, const vector<RefCountedFrame> &input_frames);
-       void shutdown();
-       void release_gl_resources();
-       void set_stream_mux(Mux *mux)
-       {
-               stream_mux = mux;
-       }
-
-       // So we never get negative dts.
-       int64_t global_delay() const {
-               return int64_t(ip_period - 1) * (TIMEBASE / MAX_FPS);
-       }
-
-private:
-       struct storage_task {
-               unsigned long long display_order;
-               int frame_type;
-               vector<float> audio;
-               int64_t pts, dts, duration;
-       };
-       struct PendingFrame {
-               RefCountedGLsync fence;
-               vector<RefCountedFrame> input_frames;
-               int64_t pts, duration;
-       };
-
-       void open_output_file(const std::string &filename);
-       void encode_thread_func();
-       void encode_remaining_frames_as_p(int encoding_frame_num, int gop_start_display_frame_num, int64_t last_dts);
-       void add_packet_for_uncompressed_frame(int64_t pts, int64_t duration, const uint8_t *data);
-       void encode_frame(PendingFrame frame, int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num,
-                         int frame_type, int64_t pts, int64_t dts, int64_t duration);
-       void storage_task_thread();
-       void storage_task_enqueue(storage_task task);
-       void save_codeddata(storage_task task);
-       int render_packedsequence();
-       int render_packedpicture();
-       void render_packedslice();
-       int render_sequence();
-       int render_picture(int frame_type, int display_frame_num, int gop_start_display_frame_num);
-       void sps_rbsp(bitstream *bs);
-       void pps_rbsp(bitstream *bs);
-       int build_packed_pic_buffer(unsigned char **header_buffer);
-       int render_slice(int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num, int frame_type);
-       void slice_header(bitstream *bs);
-       int build_packed_seq_buffer(unsigned char **header_buffer);
-       int build_packed_slice_buffer(unsigned char **header_buffer);
-       int init_va(const string &va_display);
-       int deinit_va();
-       void enable_zerocopy_if_possible();
-       VADisplay va_open_display(const string &va_display);
-       void va_close_display(VADisplay va_dpy);
-       int setup_encode();
-       void release_encode();
-       void update_ReferenceFrames(int frame_type);
-       int update_RefPicList(int frame_type);
-
-       bool is_shutdown = false;
-       bool has_released_gl_resources = false;
-       bool use_zerocopy;
-       int drm_fd = -1;
-
-       thread encode_thread, storage_thread;
-
-       mutex storage_task_queue_mutex;
-       condition_variable storage_task_queue_changed;
-       int srcsurface_status[SURFACE_NUM];  // protected by storage_task_queue_mutex
-       queue<storage_task> storage_task_queue;  // protected by storage_task_queue_mutex
-       bool storage_thread_should_quit = false;  // protected by storage_task_queue_mutex
-
-       mutex frame_queue_mutex;
-       condition_variable frame_queue_nonempty;
-       bool encode_thread_should_quit = false;  // under frame_queue_mutex
-
-       int current_storage_frame;
-
-       map<int, PendingFrame> pending_video_frames;  // under frame_queue_mutex
-       movit::ResourcePool *resource_pool;
-       QSurface *surface;
-
-       unique_ptr<AudioEncoder> file_audio_encoder;
-
-       unique_ptr<FrameReorderer> reorderer;
-       X264Encoder *x264_encoder;  // nullptr if not using x264.
-
-       Mux* stream_mux = nullptr;  // To HTTP.
-       unique_ptr<Mux> file_mux;  // To local disk.
-
-       Display *x11_display = nullptr;
-
-       // Encoder parameters
-       VADisplay va_dpy;
-       VAProfile h264_profile = (VAProfile)~0;
-       VAConfigAttrib config_attrib[VAConfigAttribTypeMax];
-       int config_attrib_num = 0, enc_packed_header_idx;
-
-       struct GLSurface {
-               VASurfaceID src_surface, ref_surface;
-               VABufferID coded_buf;
-
-               VAImage surface_image;
-               GLuint y_tex, cbcr_tex;
-
-               // Only if use_zerocopy == true.
-               EGLImage y_egl_image, cbcr_egl_image;
-
-               // Only if use_zerocopy == false.
-               GLuint pbo;
-               uint8_t *y_ptr, *cbcr_ptr;
-               size_t y_offset, cbcr_offset;
-       };
-       GLSurface gl_surfaces[SURFACE_NUM];
-
-       VAConfigID config_id;
-       VAContextID context_id;
-       VAEncSequenceParameterBufferH264 seq_param;
-       VAEncPictureParameterBufferH264 pic_param;
-       VAEncSliceParameterBufferH264 slice_param;
-       VAPictureH264 CurrentCurrPic;
-       VAPictureH264 ReferenceFrames[MAX_NUM_REF1], RefPicList0_P[MAX_NUM_REF2], RefPicList0_B[MAX_NUM_REF2], RefPicList1_B[MAX_NUM_REF2];
-
-       // Static quality settings.
-       static constexpr unsigned int frame_bitrate = 15000000 / 60;  // Doesn't really matter; only initial_qp does.
-       static constexpr unsigned int num_ref_frames = 2;
-       static constexpr int initial_qp = 15;
-       static constexpr int minimal_qp = 0;
-       static constexpr int intra_period = 30;
-       static constexpr int intra_idr_period = MAX_FPS;  // About a second; more at lower frame rates. Not ideal.
-
-       // Quality settings that are meant to be static, but might be overridden
-       // by the profile.
-       int constraint_set_flag = 0;
-       int h264_packedheader = 0; /* support pack header? */
-       int h264_maxref = (1<<16|1);
-       int h264_entropy_mode = 1; /* cabac */
-       int ip_period = 3;
-
-       int rc_mode = -1;
-       unsigned int current_frame_num = 0;
-       unsigned int numShortTerm = 0;
-
-       int frame_width;
-       int frame_height;
-       int frame_width_mbaligned;
-       int frame_height_mbaligned;
-};
-
 // Supposedly vaRenderPicture() is supposed to destroy the buffer implicitly,
 // but if we don't delete it here, we get leaks. The GStreamer implementation
 // does the same.
@@ -1621,6 +1386,10 @@ void QuickSyncEncoderImpl::save_codeddata(storage_task task)
        }
        vaUnmapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf);
 
+       static int frameno = 0;
+       print_latency("Current QuickSync latency (video inputs → disk mux):",
+               task.received_ts, (task.frame_type == FRAME_B), &frameno);
+
        {
                // Add video.
                AVPacket pkt;
@@ -1724,8 +1493,8 @@ int QuickSyncEncoderImpl::deinit_va()
     return 0;
 }
 
-QuickSyncEncoderImpl::QuickSyncEncoderImpl(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const string &va_display, int width, int height, AVOutputFormat *oformat, X264Encoder *x264_encoder)
-       : current_storage_frame(0), resource_pool(resource_pool), surface(surface), x264_encoder(x264_encoder), frame_width(width), frame_height(height)
+QuickSyncEncoderImpl::QuickSyncEncoderImpl(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const string &va_display, int width, int height, AVOutputFormat *oformat, X264Encoder *x264_encoder, DiskSpaceEstimator *disk_space_estimator)
+       : current_storage_frame(0), resource_pool(resource_pool), surface(surface), x264_encoder(x264_encoder), frame_width(width), frame_height(height), disk_space_estimator(disk_space_estimator)
 {
        file_audio_encoder.reset(new AudioEncoder(AUDIO_OUTPUT_CODEC_NAME, DEFAULT_AUDIO_OUTPUT_BIT_RATE, oformat));
        open_output_file(filename);
@@ -1736,10 +1505,6 @@ QuickSyncEncoderImpl::QuickSyncEncoderImpl(const std::string &filename, movit::R
 
        //print_input();
 
-       if (global_flags.uncompressed_video_to_http ||
-           global_flags.x264_video_to_http) {
-               reorderer.reset(new FrameReorderer(ip_period - 1, frame_width, frame_height));
-       }
        if (global_flags.x264_video_to_http) {
                assert(x264_encoder != nullptr);
        } else {
@@ -1894,7 +1659,7 @@ RefCountedGLsync QuickSyncEncoderImpl::end_frame(int64_t pts, int64_t duration,
 
        {
                unique_lock<mutex> lock(frame_queue_mutex);
-               pending_video_frames[current_storage_frame] = PendingFrame{ fence, input_frames, pts, duration };
+               pending_video_frames.push(PendingFrame{ fence, input_frames, pts, duration });
                ++current_storage_frame;
        }
        frame_queue_nonempty.notify_all();
@@ -1947,65 +1712,85 @@ void QuickSyncEncoderImpl::open_output_file(const std::string &filename)
 
        string video_extradata = "";  // FIXME: See other comment about global headers.
        AVCodecParametersWithDeleter audio_codecpar = file_audio_encoder->get_codec_parameters();
-       file_mux.reset(new Mux(avctx, frame_width, frame_height, Mux::CODEC_H264, video_extradata, audio_codecpar.get(), TIMEBASE));
+       file_mux.reset(new Mux(avctx, frame_width, frame_height, Mux::CODEC_H264, video_extradata, audio_codecpar.get(), TIMEBASE,
+               std::bind(&DiskSpaceEstimator::report_write, disk_space_estimator, filename, _1)));
 }
 
 void QuickSyncEncoderImpl::encode_thread_func()
 {
        int64_t last_dts = -1;
        int gop_start_display_frame_num = 0;
-       for (int encoding_frame_num = 0; ; ++encoding_frame_num) {
+       for (int display_frame_num = 0; ; ++display_frame_num) {
+               // Wait for the frame to be in the queue. Note that this only means
+               // we started rendering it.
                PendingFrame frame;
-               int pts_lag;
-               int frame_type, display_frame_num;
-               encoding2display_order(encoding_frame_num, intra_period, intra_idr_period, ip_period,
-                                      &display_frame_num, &frame_type, &pts_lag);
-               if (frame_type == FRAME_IDR) {
-                       numShortTerm = 0;
-                       current_frame_num = 0;
-                       gop_start_display_frame_num = display_frame_num;
-               }
-
                {
                        unique_lock<mutex> lock(frame_queue_mutex);
-                       frame_queue_nonempty.wait(lock, [this, display_frame_num]{
-                               return encode_thread_should_quit || pending_video_frames.count(display_frame_num) != 0;
+                       frame_queue_nonempty.wait(lock, [this]{
+                               return encode_thread_should_quit || !pending_video_frames.empty();
                        });
-                       if (encode_thread_should_quit && pending_video_frames.count(display_frame_num) == 0) {
-                               // We have queued frames that were supposed to be B-frames,
-                               // but will be no P-frame to encode them against. Encode them all
-                               // as P-frames instead. Note that this happens under the mutex,
+                       if (encode_thread_should_quit && pending_video_frames.empty()) {
+                               // We may have queued frames left in the reorder buffer
+                               // that were supposed to be B-frames, but have no P-frame
+                               // to be encoded against. If so, encode them all as
+                               // P-frames instead. Note that this happens under the mutex,
                                // but nobody else uses it at this point, since we're shutting down,
                                // so there's no contention.
-                               encode_remaining_frames_as_p(encoding_frame_num, gop_start_display_frame_num, last_dts);
+                               encode_remaining_frames_as_p(quicksync_encoding_frame_num, gop_start_display_frame_num, last_dts);
                                return;
                        } else {
-                               frame = move(pending_video_frames[display_frame_num]);
-                               pending_video_frames.erase(display_frame_num);
+                               frame = move(pending_video_frames.front());
+                               pending_video_frames.pop();
                        }
                }
 
-               // Determine the dts of this frame.
-               int64_t dts;
-               if (pts_lag == -1) {
-                       assert(last_dts != -1);
-                       dts = last_dts + (TIMEBASE / MAX_FPS);
-               } else {
-                       dts = frame.pts - pts_lag;
-               }
-               last_dts = dts;
+               // Pass the frame on to x264 (or uncompressed to HTTP) as needed.
+               // Note that this implicitly waits for the frame to be done rendering.
+               pass_frame(frame, display_frame_num, frame.pts, frame.duration);
+               reorder_buffer[display_frame_num] = move(frame);
+
+               // Now encode as many QuickSync frames as we can using the frames we have available.
+               // (It could be zero, or it could be multiple.) FIXME: make a function.
+               for ( ;; ) {
+                       int pts_lag;
+                       int frame_type, quicksync_display_frame_num;
+                       encoding2display_order(quicksync_encoding_frame_num, intra_period, intra_idr_period, ip_period,
+                                              &quicksync_display_frame_num, &frame_type, &pts_lag);
+                       if (!reorder_buffer.count(quicksync_display_frame_num)) {
+                               break;
+                       }
+                       frame = move(reorder_buffer[quicksync_display_frame_num]);
+                       reorder_buffer.erase(quicksync_display_frame_num);
+
+                       if (frame_type == FRAME_IDR) {
+                               numShortTerm = 0;
+                               current_frame_num = 0;
+                               gop_start_display_frame_num = quicksync_display_frame_num;
+                       }
+
+                       // Determine the dts of this frame.
+                       int64_t dts;
+                       if (pts_lag == -1) {
+                               assert(last_dts != -1);
+                               dts = last_dts + (TIMEBASE / MAX_FPS);
+                       } else {
+                               dts = frame.pts - pts_lag;
+                       }
+                       last_dts = dts;
 
-               encode_frame(frame, encoding_frame_num, display_frame_num, gop_start_display_frame_num, frame_type, frame.pts, dts, frame.duration);
+                       encode_frame(frame, quicksync_encoding_frame_num, quicksync_display_frame_num, gop_start_display_frame_num, frame_type, frame.pts, dts, frame.duration);
+                       ++quicksync_encoding_frame_num;
+               }
        }
 }
 
 void QuickSyncEncoderImpl::encode_remaining_frames_as_p(int encoding_frame_num, int gop_start_display_frame_num, int64_t last_dts)
 {
-       if (pending_video_frames.empty()) {
+       if (reorder_buffer.empty()) {
                return;
        }
 
-       for (auto &pending_frame : pending_video_frames) {
+       for (auto &pending_frame : reorder_buffer) {
                int display_frame_num = pending_frame.first;
                assert(display_frame_num > 0);
                PendingFrame frame = move(pending_frame.second);
@@ -2014,20 +1799,6 @@ void QuickSyncEncoderImpl::encode_remaining_frames_as_p(int encoding_frame_num,
                encode_frame(frame, encoding_frame_num++, display_frame_num, gop_start_display_frame_num, FRAME_P, frame.pts, dts, frame.duration);
                last_dts = dts;
        }
-
-       if (global_flags.uncompressed_video_to_http ||
-           global_flags.x264_video_to_http) {
-               // Add frames left in reorderer.
-               while (!reorderer->empty()) {
-                       FrameReorderer::Frame output_frame = reorderer->get_first_frame();
-                       if (global_flags.uncompressed_video_to_http) {
-                               add_packet_for_uncompressed_frame(output_frame.pts, output_frame.duration, output_frame.data);
-                       } else {
-                               assert(global_flags.x264_video_to_http);
-                               x264_encoder->add_frame(output_frame.pts, output_frame.duration, output_frame.data);
-                       }
-               }
-       }
 }
 
 void QuickSyncEncoderImpl::add_packet_for_uncompressed_frame(int64_t pts, int64_t duration, const uint8_t *data)
@@ -2060,8 +1831,7 @@ void memcpy_with_pitch(uint8_t *dst, const uint8_t *src, size_t src_width, size_
 
 }  // namespace
 
-void QuickSyncEncoderImpl::encode_frame(QuickSyncEncoderImpl::PendingFrame frame, int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num,
-                                   int frame_type, int64_t pts, int64_t dts, int64_t duration)
+void QuickSyncEncoderImpl::pass_frame(QuickSyncEncoderImpl::PendingFrame frame, int display_frame_num, int64_t pts, int64_t duration)
 {
        // Wait for the GPU to be done with the frame.
        GLenum sync_status;
@@ -2071,9 +1841,28 @@ void QuickSyncEncoderImpl::encode_frame(QuickSyncEncoderImpl::PendingFrame frame
        } while (sync_status == GL_TIMEOUT_EXPIRED);
        assert(sync_status != GL_WAIT_FAILED);
 
+       ReceivedTimestamps received_ts = find_received_timestamp(frame.input_frames);
+       static int frameno = 0;
+       print_latency("Current mixer latency (video inputs → ready for encode):",
+               received_ts, false, &frameno);
+
        // Release back any input frames we needed to render this frame.
        frame.input_frames.clear();
 
+       GLSurface *surf = &gl_surfaces[display_frame_num % SURFACE_NUM];
+       uint8_t *data = reinterpret_cast<uint8_t *>(surf->y_ptr);
+       if (global_flags.uncompressed_video_to_http) {
+               add_packet_for_uncompressed_frame(pts, duration, data);
+       } else if (global_flags.x264_video_to_http) {
+               x264_encoder->add_frame(pts, duration, data, received_ts);
+       }
+}
+
+void QuickSyncEncoderImpl::encode_frame(QuickSyncEncoderImpl::PendingFrame frame, int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num,
+                                        int frame_type, int64_t pts, int64_t dts, int64_t duration)
+{
+       const ReceivedTimestamps received_ts = find_received_timestamp(frame.input_frames);
+
        GLSurface *surf = &gl_surfaces[display_frame_num % SURFACE_NUM];
        VAStatus va_status;
 
@@ -2083,6 +1872,7 @@ void QuickSyncEncoderImpl::encode_frame(QuickSyncEncoderImpl::PendingFrame frame
                va_status = vaReleaseBufferHandle(va_dpy, surf->surface_image.buf);
                CHECK_VASTATUS(va_status, "vaReleaseBufferHandle");
        } else {
+               // Upload the frame to VA-API.
                unsigned char *surface_p = nullptr;
                vaMapBuffer(va_dpy, surf->surface_image.buf, (void **)&surface_p);
 
@@ -2094,21 +1884,6 @@ void QuickSyncEncoderImpl::encode_frame(QuickSyncEncoderImpl::PendingFrame frame
 
                va_status = vaUnmapBuffer(va_dpy, surf->surface_image.buf);
                CHECK_VASTATUS(va_status, "vaUnmapBuffer");
-
-               if (global_flags.uncompressed_video_to_http ||
-                   global_flags.x264_video_to_http) {
-                       // Add uncompressed video. (Note that pts == dts here.)
-                       // Delay needs to match audio.
-                       FrameReorderer::Frame output_frame = reorderer->reorder_frame(pts + global_delay(), duration, reinterpret_cast<uint8_t *>(surf->y_ptr));
-                       if (output_frame.data != nullptr) {
-                               if (global_flags.uncompressed_video_to_http) {
-                                       add_packet_for_uncompressed_frame(output_frame.pts, output_frame.duration, output_frame.data);
-                               } else {
-                                       assert(global_flags.x264_video_to_http);
-                                       x264_encoder->add_frame(output_frame.pts, output_frame.duration, output_frame.data);
-                               }
-                       }
-               }
        }
 
        va_status = vaDestroyImage(va_dpy, surf->surface_image.image_id);
@@ -2146,14 +1921,15 @@ void QuickSyncEncoderImpl::encode_frame(QuickSyncEncoderImpl::PendingFrame frame
        tmp.pts = pts;
        tmp.dts = dts;
        tmp.duration = duration;
+       tmp.received_ts = received_ts;
        storage_task_enqueue(move(tmp));
 
        update_ReferenceFrames(frame_type);
 }
 
 // Proxy object.
-QuickSyncEncoder::QuickSyncEncoder(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const string &va_display, int width, int height, AVOutputFormat *oformat, X264Encoder *x264_encoder)
-       : impl(new QuickSyncEncoderImpl(filename, resource_pool, surface, va_display, width, height, oformat, x264_encoder)) {}
+QuickSyncEncoder::QuickSyncEncoder(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const string &va_display, int width, int height, AVOutputFormat *oformat, X264Encoder *x264_encoder, DiskSpaceEstimator *disk_space_estimator)
+       : impl(new QuickSyncEncoderImpl(filename, resource_pool, surface, va_display, width, height, oformat, x264_encoder, disk_space_estimator)) {}
 
 // Must be defined here because unique_ptr<> destructor needs to know the impl.
 QuickSyncEncoder::~QuickSyncEncoder() {}