]> git.sesse.net Git - nageru/blobdiff - h264encode.cpp
Separate muxing entirely out of the HTTPD class.
[nageru] / h264encode.cpp
index 72fca67ea963eab55c493e4c159fd48e8198f908..b9c52eb55e00914406fc73b131eb47657f13336f 100644 (file)
 extern "C" {
 #include <libavcodec/avcodec.h>
 #include <libavformat/avformat.h>
+#include <libavresample/avresample.h>
 #include <libavutil/channel_layout.h>
 #include <libavutil/frame.h>
 #include <libavutil/rational.h>
 #include <libavutil/samplefmt.h>
+#include <libavutil/opt.h>
 }
 #include <libdrm/drm_fourcc.h>
 #include <stdio.h>
@@ -40,7 +42,9 @@ extern "C" {
 #include "defs.h"
 #include "flags.h"
 #include "httpd.h"
+#include "mux.h"
 #include "timebase.h"
+#include "x264encode.h"
 
 using namespace std;
 
@@ -190,7 +194,7 @@ pair<int64_t, const uint8_t *> FrameReorderer::get_first_frame()
        return make_pair(-pts, storage.second);  // Re-invert pts (see reorder_frame()).
 }
 
-class H264EncoderImpl {
+class H264EncoderImpl : public KeyFrameSignalReceiver {
 public:
        H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd);
        ~H264EncoderImpl();
@@ -198,6 +202,12 @@ public:
        bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex);
        RefCountedGLsync end_frame(int64_t pts, const vector<RefCountedFrame> &input_frames);
        void shutdown();
+       void open_output_file(const std::string &filename);
+       void close_output_file();
+
+       virtual void signal_keyframe() override {
+               stream_mux_writing_keyframes = true;
+       }
 
 private:
        struct storage_task {
@@ -212,12 +222,29 @@ private:
                int64_t pts;
        };
 
+       // So we never get negative dts.
+       int64_t global_delay() const {
+               return int64_t(ip_period - 1) * (TIMEBASE / MAX_FPS);
+       }
+
        void encode_thread_func();
        void encode_remaining_frames_as_p(int encoding_frame_num, int gop_start_display_frame_num, int64_t last_dts);
        void add_packet_for_uncompressed_frame(int64_t pts, const uint8_t *data);
        void encode_frame(PendingFrame frame, int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num,
                          int frame_type, int64_t pts, int64_t dts);
        void storage_task_thread();
+       void encode_audio(const vector<float> &audio,
+                         vector<float> *audio_queue,
+                         int64_t audio_pts,
+                         AVCodecContext *ctx,
+                         AVAudioResampleContext *resampler,
+                         const vector<Mux *> &muxes);
+       void encode_audio_one_frame(const float *audio,
+                                   size_t num_samples,  // In each channel.
+                                   int64_t audio_pts,
+                                   AVCodecContext *ctx,
+                                   AVAudioResampleContext *resampler,
+                                   const vector<Mux *> &muxes);
        void storage_task_enqueue(storage_task task);
        void save_codeddata(storage_task task);
        int render_packedsequence();
@@ -241,6 +268,10 @@ private:
        int release_encode();
        void update_ReferenceFrames(int frame_type);
        int update_RefPicList(int frame_type);
+       void open_output_stream();
+       void close_output_stream();
+       static int write_packet_thunk(void *opaque, uint8_t *buf, int buf_size);
+       int write_packet(uint8_t *buf, int buf_size);
 
        bool is_shutdown = false;
        bool use_zerocopy;
@@ -264,10 +295,19 @@ private:
        map<int64_t, vector<float>> pending_audio_frames;  // under frame_queue_mutex
        QSurface *surface;
 
-       AVCodecContext *context_audio;
+       AVCodecContext *context_audio_file;
+       AVCodecContext *context_audio_stream = nullptr;  // nullptr = don't code separate audio for stream.
+
+       AVAudioResampleContext *resampler_audio_file = nullptr;
+       AVAudioResampleContext *resampler_audio_stream = nullptr;
+
+       vector<float> audio_queue_file;
+       vector<float> audio_queue_stream;
+
        AVFrame *audio_frame = nullptr;
        HTTPD *httpd;
        unique_ptr<FrameReorderer> reorderer;
+       unique_ptr<X264Encoder> x264_encoder;  // nullptr if not using x264.
 
        Display *x11_display = nullptr;
 
@@ -326,6 +366,16 @@ private:
        int frame_height;
        int frame_width_mbaligned;
        int frame_height_mbaligned;
+
+       unique_ptr<Mux> stream_mux;  // To HTTP.
+       unique_ptr<Mux> file_mux;  // To local disk.
+
+       // While Mux object is constructing, <stream_mux_writing_header> is true,
+       // and the header is being collected into stream_mux_header.
+       bool stream_mux_writing_header;
+       string stream_mux_header;
+
+       bool stream_mux_writing_keyframes = false;
 };
 
 // Supposedly vaRenderPicture() is supposed to destroy the buffer implicitly,
@@ -932,6 +982,9 @@ void H264EncoderImpl::enable_zerocopy_if_possible()
        if (global_flags.uncompressed_video_to_http) {
                fprintf(stderr, "Disabling zerocopy H.264 encoding due to --uncompressed_video_to_http.\n");
                use_zerocopy = false;
+       } else if (global_flags.x264_video_to_http) {
+               fprintf(stderr, "Disabling zerocopy H.264 encoding due to --x264_video_to_http.\n");
+               use_zerocopy = false;
        } else {
                use_zerocopy = true;
        }
@@ -1585,91 +1638,145 @@ int H264EncoderImpl::render_slice(int encoding_frame_num, int display_frame_num,
 
 void H264EncoderImpl::save_codeddata(storage_task task)
 {    
-    VACodedBufferSegment *buf_list = NULL;
-    VAStatus va_status;
+       VACodedBufferSegment *buf_list = NULL;
+       VAStatus va_status;
 
-    string data;
+       string data;
 
-    const int64_t global_delay = int64_t(ip_period - 1) * (TIMEBASE / MAX_FPS);  // So we never get negative dts.
+       va_status = vaMapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list));
+       CHECK_VASTATUS(va_status, "vaMapBuffer");
+       while (buf_list != NULL) {
+               data.append(reinterpret_cast<const char *>(buf_list->buf), buf_list->size);
+               buf_list = (VACodedBufferSegment *) buf_list->next;
+       }
+       vaUnmapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf);
 
-    va_status = vaMapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list));
-    CHECK_VASTATUS(va_status, "vaMapBuffer");
-    while (buf_list != NULL) {
-        data.append(reinterpret_cast<const char *>(buf_list->buf), buf_list->size);
-        buf_list = (VACodedBufferSegment *) buf_list->next;
-    }
-    vaUnmapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf);
-
-    {
-        // Add video.
-        AVPacket pkt;
-        memset(&pkt, 0, sizeof(pkt));
-        pkt.buf = nullptr;
-        pkt.data = reinterpret_cast<uint8_t *>(&data[0]);
-        pkt.size = data.size();
-        pkt.stream_index = 0;
-        if (task.frame_type == FRAME_IDR) {
-            pkt.flags = AV_PKT_FLAG_KEY;
-        } else {
-            pkt.flags = 0;
-        }
-        //pkt.duration = 1;
-        httpd->add_packet(pkt, task.pts + global_delay, task.dts + global_delay,
-               global_flags.uncompressed_video_to_http ? HTTPD::DESTINATION_FILE_ONLY : HTTPD::DESTINATION_FILE_AND_HTTP);
-    }
-    // Encode and add all audio frames up to and including the pts of this video frame.
-    for ( ;; ) {
-        int64_t audio_pts;
-        vector<float> audio;
-        {
-             unique_lock<mutex> lock(frame_queue_mutex);
-             frame_queue_nonempty.wait(lock, [this]{ return storage_thread_should_quit || !pending_audio_frames.empty(); });
-             if (storage_thread_should_quit && pending_audio_frames.empty()) return;
-             auto it = pending_audio_frames.begin();
-             if (it->first > task.pts) break;
-             audio_pts = it->first;
-             audio = move(it->second);
-             pending_audio_frames.erase(it); 
-        }
+       {
+               // Add video.
+               AVPacket pkt;
+               memset(&pkt, 0, sizeof(pkt));
+               pkt.buf = nullptr;
+               pkt.data = reinterpret_cast<uint8_t *>(&data[0]);
+               pkt.size = data.size();
+               pkt.stream_index = 0;
+               if (task.frame_type == FRAME_IDR) {
+                       pkt.flags = AV_PKT_FLAG_KEY;
+               } else {
+                       pkt.flags = 0;
+               }
+               //pkt.duration = 1;
+               if (file_mux) {
+                       file_mux->add_packet(pkt, task.pts + global_delay(), task.dts + global_delay());
+               }
+               if (!global_flags.uncompressed_video_to_http &&
+                   !global_flags.x264_video_to_http) {
+                       stream_mux->add_packet(pkt, task.pts + global_delay(), task.dts + global_delay());
+               }
+       }
+       // Encode and add all audio frames up to and including the pts of this video frame.
+       for ( ;; ) {
+               int64_t audio_pts;
+               vector<float> audio;
+               {
+                       unique_lock<mutex> lock(frame_queue_mutex);
+                       frame_queue_nonempty.wait(lock, [this]{ return storage_thread_should_quit || !pending_audio_frames.empty(); });
+                       if (storage_thread_should_quit && pending_audio_frames.empty()) return;
+                       auto it = pending_audio_frames.begin();
+                       if (it->first > task.pts) break;
+                       audio_pts = it->first;
+                       audio = move(it->second);
+                       pending_audio_frames.erase(it); 
+               }
 
-        audio_frame->nb_samples = audio.size() / 2;
-        audio_frame->format = AV_SAMPLE_FMT_S32;
-        audio_frame->channel_layout = AV_CH_LAYOUT_STEREO;
+               if (context_audio_stream) {
+                       encode_audio(audio, &audio_queue_file, audio_pts, context_audio_file, resampler_audio_file, { file_mux.get() });
+                       encode_audio(audio, &audio_queue_stream, audio_pts, context_audio_stream, resampler_audio_stream, { stream_mux.get() });
+               } else {
+                       encode_audio(audio, &audio_queue_file, audio_pts, context_audio_file, resampler_audio_file, { stream_mux.get(), file_mux.get() });
+               }
 
-        unique_ptr<int32_t[]> int_samples(new int32_t[audio.size()]);
-        int ret = avcodec_fill_audio_frame(audio_frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), audio.size() * sizeof(int32_t), 1);
-        if (ret < 0) {
-            fprintf(stderr, "avcodec_fill_audio_frame() failed with %d\n", ret);
-            exit(1);
-        }
-        for (int i = 0; i < audio_frame->nb_samples * 2; ++i) {
-            if (audio[i] >= 1.0f) {
-                int_samples[i] = 2147483647;
-            } else if (audio[i] <= -1.0f) {
-                int_samples[i] = -2147483647;
-            } else {
-                int_samples[i] = lrintf(audio[i] * 2147483647.0f);
-            }
-        }
+               if (audio_pts == task.pts) break;
+       }
+}
 
-        AVPacket pkt;
-        av_init_packet(&pkt);
-        pkt.data = nullptr;
-        pkt.size = 0;
-        int got_output;
-        avcodec_encode_audio2(context_audio, &pkt, audio_frame, &got_output);
-        if (got_output) {
-            pkt.stream_index = 1;
-            pkt.flags = AV_PKT_FLAG_KEY;
-            httpd->add_packet(pkt, audio_pts + global_delay, audio_pts + global_delay, HTTPD::DESTINATION_FILE_AND_HTTP);
-        }
-        // TODO: Delayed frames.
-        av_frame_unref(audio_frame);
-        av_free_packet(&pkt);
-        if (audio_pts == task.pts) break;
-    }
+void H264EncoderImpl::encode_audio(
+       const vector<float> &audio,
+       vector<float> *audio_queue,
+       int64_t audio_pts,
+       AVCodecContext *ctx,
+       AVAudioResampleContext *resampler,
+       const vector<Mux *> &muxes)
+{
+       if (ctx->frame_size == 0) {
+               // No queueing needed.
+               assert(audio_queue->empty());
+               assert(audio.size() % 2 == 0);
+               encode_audio_one_frame(&audio[0], audio.size() / 2, audio_pts, ctx, resampler, muxes);
+               return;
+       }
+
+       int64_t sample_offset = audio_queue->size();
+
+       audio_queue->insert(audio_queue->end(), audio.begin(), audio.end());
+       size_t sample_num;
+       for (sample_num = 0;
+            sample_num + ctx->frame_size * 2 <= audio_queue->size();
+            sample_num += ctx->frame_size * 2) {
+               int64_t adjusted_audio_pts = audio_pts + (int64_t(sample_num) - sample_offset) * TIMEBASE / (OUTPUT_FREQUENCY * 2);
+               encode_audio_one_frame(&(*audio_queue)[sample_num],
+                                      ctx->frame_size,
+                                      adjusted_audio_pts,
+                                      ctx,
+                                      resampler,
+                                      muxes);
+       }
+       audio_queue->erase(audio_queue->begin(), audio_queue->begin() + sample_num);
 }
 
+void H264EncoderImpl::encode_audio_one_frame(
+       const float *audio,
+       size_t num_samples,
+       int64_t audio_pts,
+       AVCodecContext *ctx,
+       AVAudioResampleContext *resampler,
+       const vector<Mux *> &muxes)
+{
+       audio_frame->nb_samples = num_samples;
+       audio_frame->channel_layout = AV_CH_LAYOUT_STEREO;
+       audio_frame->format = ctx->sample_fmt;
+       audio_frame->sample_rate = OUTPUT_FREQUENCY;
+
+       if (av_samples_alloc(audio_frame->data, nullptr, 2, num_samples, ctx->sample_fmt, 0) < 0) {
+               fprintf(stderr, "Could not allocate %ld samples.\n", num_samples);
+               exit(1);
+       }
+
+       if (avresample_convert(resampler, audio_frame->data, 0, num_samples,
+                              (uint8_t **)&audio, 0, num_samples) < 0) {
+               fprintf(stderr, "Audio conversion failed.\n");
+               exit(1);
+       }
+
+       AVPacket pkt;
+       av_init_packet(&pkt);
+       pkt.data = nullptr;
+       pkt.size = 0;
+       int got_output = 0;
+       avcodec_encode_audio2(ctx, &pkt, audio_frame, &got_output);
+       if (got_output) {
+               pkt.stream_index = 1;
+               pkt.flags = 0;
+               for (Mux *mux : muxes) {
+                       mux->add_packet(pkt, audio_pts + global_delay(), audio_pts + global_delay());
+               }
+       }
+
+       av_freep(&audio_frame->data[0]);
+
+       // TODO: Delayed frames.
+       av_frame_unref(audio_frame);
+       av_free_packet(&pkt);
+}
 
 // this is weird. but it seems to put a new frame onto the queue
 void H264EncoderImpl::storage_task_enqueue(storage_task task)
@@ -1739,34 +1846,78 @@ int H264EncoderImpl::deinit_va()
     return 0;
 }
 
+namespace {
 
-H264EncoderImpl::H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd)
-       : current_storage_frame(0), surface(surface), httpd(httpd)
+void init_audio_encoder(const string &codec_name, int bit_rate, AVCodecContext **ctx, AVAudioResampleContext **resampler)
 {
-       AVCodec *codec_audio = avcodec_find_encoder(AUDIO_OUTPUT_CODEC);
-       context_audio = avcodec_alloc_context3(codec_audio);
-       context_audio->bit_rate = AUDIO_OUTPUT_BIT_RATE;
+       AVCodec *codec_audio = avcodec_find_encoder_by_name(codec_name.c_str());
+       if (codec_audio == nullptr) {
+               fprintf(stderr, "ERROR: Could not find codec '%s'\n", codec_name.c_str());
+               exit(1);
+       }
+
+       AVCodecContext *context_audio = avcodec_alloc_context3(codec_audio);
+       context_audio->bit_rate = bit_rate;
        context_audio->sample_rate = OUTPUT_FREQUENCY;
-       context_audio->sample_fmt = AUDIO_OUTPUT_SAMPLE_FMT;
+       context_audio->sample_fmt = codec_audio->sample_fmts[0];
        context_audio->channels = 2;
        context_audio->channel_layout = AV_CH_LAYOUT_STEREO;
        context_audio->time_base = AVRational{1, TIMEBASE};
+       context_audio->flags |= CODEC_FLAG_GLOBAL_HEADER;
        if (avcodec_open2(context_audio, codec_audio, NULL) < 0) {
-               fprintf(stderr, "Could not open codec\n");
+               fprintf(stderr, "Could not open codec '%s'\n", codec_name.c_str());
                exit(1);
        }
-       audio_frame = av_frame_alloc();
 
-       frame_width = width;
-       frame_height = height;
+       *ctx = context_audio;
+
+       *resampler = avresample_alloc_context();
+       if (*resampler == nullptr) {
+               fprintf(stderr, "Allocating resampler failed.\n");
+               exit(1);
+       }
+
+       av_opt_set_int(*resampler, "in_channel_layout",  AV_CH_LAYOUT_STEREO,       0);
+       av_opt_set_int(*resampler, "out_channel_layout", AV_CH_LAYOUT_STEREO,       0);
+       av_opt_set_int(*resampler, "in_sample_rate",     OUTPUT_FREQUENCY,          0);
+       av_opt_set_int(*resampler, "out_sample_rate",    OUTPUT_FREQUENCY,          0);
+       av_opt_set_int(*resampler, "in_sample_fmt",      AV_SAMPLE_FMT_FLT,         0);
+       av_opt_set_int(*resampler, "out_sample_fmt",     context_audio->sample_fmt, 0);
+
+       if (avresample_open(*resampler) < 0) {
+               fprintf(stderr, "Could not open resample context.\n");
+               exit(1);
+       }
+}
+
+}  // namespace
+
+H264EncoderImpl::H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd)
+       : current_storage_frame(0), surface(surface), httpd(httpd), frame_width(width), frame_height(height)
+{
+       init_audio_encoder(AUDIO_OUTPUT_CODEC_NAME, DEFAULT_AUDIO_OUTPUT_BIT_RATE, &context_audio_file, &resampler_audio_file);
+
+       if (!global_flags.stream_audio_codec_name.empty()) {
+               init_audio_encoder(global_flags.stream_audio_codec_name,
+                       global_flags.stream_audio_codec_bitrate, &context_audio_stream, &resampler_audio_stream);
+       }
+
        frame_width_mbaligned = (frame_width + 15) & (~15);
        frame_height_mbaligned = (frame_height + 15) & (~15);
 
+       open_output_stream();
+
+       audio_frame = av_frame_alloc();
+
        //print_input();
 
-       if (global_flags.uncompressed_video_to_http) {
+       if (global_flags.uncompressed_video_to_http ||
+           global_flags.x264_video_to_http) {
                reorderer.reset(new FrameReorderer(ip_period - 1, frame_width, frame_height));
        }
+       if (global_flags.x264_video_to_http) {
+               x264_encoder.reset(new X264Encoder(stream_mux.get()));
+       }
 
        init_va(va_display);
        setup_encode();
@@ -1797,8 +1948,11 @@ H264EncoderImpl::~H264EncoderImpl()
 {
        shutdown();
        av_frame_free(&audio_frame);
-
-       // TODO: Destroy context.
+       avresample_free(&resampler_audio_file);
+       avresample_free(&resampler_audio_stream);
+       avcodec_free_context(&context_audio_file);
+       avcodec_free_context(&context_audio_stream);
+       close_output_stream();
 }
 
 bool H264EncoderImpl::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
@@ -1953,6 +2107,94 @@ void H264EncoderImpl::shutdown()
        is_shutdown = true;
 }
 
+void H264EncoderImpl::open_output_file(const std::string &filename)
+{
+       AVFormatContext *avctx = avformat_alloc_context();
+       avctx->oformat = av_guess_format(NULL, filename.c_str(), NULL);
+       assert(filename.size() < sizeof(avctx->filename) - 1);
+       strcpy(avctx->filename, filename.c_str());
+
+       string url = "file:" + filename;
+       int ret = avio_open2(&avctx->pb, url.c_str(), AVIO_FLAG_WRITE, &avctx->interrupt_callback, NULL);
+       if (ret < 0) {
+               char tmp[AV_ERROR_MAX_STRING_SIZE];
+               fprintf(stderr, "%s: avio_open2() failed: %s\n", filename.c_str(), av_make_error_string(tmp, sizeof(tmp), ret));
+               exit(1);
+       }
+
+       file_mux.reset(new Mux(avctx, frame_width, frame_height, Mux::CODEC_H264, context_audio_file->codec, TIMEBASE, DEFAULT_AUDIO_OUTPUT_BIT_RATE, nullptr));
+}
+
+void H264EncoderImpl::close_output_file()
+{
+        file_mux.reset();
+}
+
+void H264EncoderImpl::open_output_stream()
+{
+       AVFormatContext *avctx = avformat_alloc_context();
+       AVOutputFormat *oformat = av_guess_format(global_flags.stream_mux_name.c_str(), nullptr, nullptr);
+       assert(oformat != nullptr);
+       avctx->oformat = oformat;
+
+       string codec_name;
+       int bit_rate;
+
+       if (global_flags.stream_audio_codec_name.empty()) {
+               codec_name = AUDIO_OUTPUT_CODEC_NAME;
+               bit_rate = DEFAULT_AUDIO_OUTPUT_BIT_RATE;
+       } else {
+               codec_name = global_flags.stream_audio_codec_name;
+               bit_rate = global_flags.stream_audio_codec_bitrate;
+       }
+
+       uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
+       avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, &H264EncoderImpl::write_packet_thunk, nullptr);
+
+       Mux::Codec video_codec;
+       if (global_flags.uncompressed_video_to_http) {
+               video_codec = Mux::CODEC_NV12;
+       } else {
+               video_codec = Mux::CODEC_H264;
+       }
+
+       avctx->flags = AVFMT_FLAG_CUSTOM_IO;
+       AVCodec *codec_audio = avcodec_find_encoder_by_name(codec_name.c_str());
+       if (codec_audio == nullptr) {
+               fprintf(stderr, "ERROR: Could not find codec '%s'\n", codec_name.c_str());
+               exit(1);
+       }
+
+       int time_base = global_flags.stream_coarse_timebase ? COARSE_TIMEBASE : TIMEBASE;
+       stream_mux_writing_header = true;
+       stream_mux.reset(new Mux(avctx, frame_width, frame_height, video_codec, codec_audio, time_base, bit_rate, this));
+       stream_mux_writing_header = false;
+       httpd->set_header(stream_mux_header);
+       stream_mux_header.clear();
+}
+
+void H264EncoderImpl::close_output_stream()
+{
+       stream_mux.reset();
+}
+
+int H264EncoderImpl::write_packet_thunk(void *opaque, uint8_t *buf, int buf_size)
+{
+       H264EncoderImpl *h264_encoder = (H264EncoderImpl *)opaque;
+       return h264_encoder->write_packet(buf, buf_size);
+}
+
+int H264EncoderImpl::write_packet(uint8_t *buf, int buf_size)
+{
+       if (stream_mux_writing_header) {
+               stream_mux_header.append((char *)buf, buf_size);
+       } else {
+               httpd->add_data((char *)buf, buf_size, stream_mux_writing_keyframes);
+               stream_mux_writing_keyframes = false;
+       }
+       return buf_size;
+}
+
 void H264EncoderImpl::encode_thread_func()
 {
        int64_t last_dts = -1;
@@ -2018,11 +2260,17 @@ void H264EncoderImpl::encode_remaining_frames_as_p(int encoding_frame_num, int g
                last_dts = dts;
        }
 
-       if (global_flags.uncompressed_video_to_http) {
+       if (global_flags.uncompressed_video_to_http ||
+           global_flags.x264_video_to_http) {
                // Add frames left in reorderer.
                while (!reorderer->empty()) {
                        pair<int64_t, const uint8_t *> output_frame = reorderer->get_first_frame();
-                       add_packet_for_uncompressed_frame(output_frame.first, output_frame.second);
+                       if (global_flags.uncompressed_video_to_http) {
+                               add_packet_for_uncompressed_frame(output_frame.first, output_frame.second);
+                       } else {
+                               assert(global_flags.x264_video_to_http);
+                               x264_encoder->add_frame(output_frame.first, output_frame.second);
+                       }
                }
        }
 }
@@ -2036,7 +2284,7 @@ void H264EncoderImpl::add_packet_for_uncompressed_frame(int64_t pts, const uint8
        pkt.size = frame_width * frame_height * 2;
        pkt.stream_index = 0;
        pkt.flags = AV_PKT_FLAG_KEY;
-       httpd->add_packet(pkt, pts, pts, HTTPD::DESTINATION_HTTP_ONLY);
+       stream_mux->add_packet(pkt, pts, pts);
 }
 
 namespace {
@@ -2091,12 +2339,18 @@ void H264EncoderImpl::encode_frame(H264EncoderImpl::PendingFrame frame, int enco
                va_status = vaUnmapBuffer(va_dpy, surf->surface_image.buf);
                CHECK_VASTATUS(va_status, "vaUnmapBuffer");
 
-               if (global_flags.uncompressed_video_to_http) {
+               if (global_flags.uncompressed_video_to_http ||
+                   global_flags.x264_video_to_http) {
                        // Add uncompressed video. (Note that pts == dts here.)
-                       const int64_t global_delay = int64_t(ip_period - 1) * (TIMEBASE / MAX_FPS);  // Needs to match audio.
-                       pair<int64_t, const uint8_t *> output_frame = reorderer->reorder_frame(pts + global_delay, reinterpret_cast<uint8_t *>(surf->y_ptr));
+                       // Delay needs to match audio.
+                       pair<int64_t, const uint8_t *> output_frame = reorderer->reorder_frame(pts + global_delay(), reinterpret_cast<uint8_t *>(surf->y_ptr));
                        if (output_frame.second != nullptr) {
-                               add_packet_for_uncompressed_frame(output_frame.first, output_frame.second);
+                               if (global_flags.uncompressed_video_to_http) {
+                                       add_packet_for_uncompressed_frame(output_frame.first, output_frame.second);
+                               } else {
+                                       assert(global_flags.x264_video_to_http);
+                                       x264_encoder->add_frame(output_frame.first, output_frame.second);
+                               }
                        }
                }
        }
@@ -2164,4 +2418,12 @@ void H264Encoder::shutdown()
        impl->shutdown();
 }
 
-// Real class.
+void H264Encoder::open_output_file(const std::string &filename)
+{
+       impl->open_output_file(filename);
+}
+
+void H264Encoder::close_output_file()
+{
+       impl->close_output_file();
+}