]> git.sesse.net Git - nageru/blobdiff - h264encode.cpp
Make it possible for file and HTTP streams to use different audio codecs.
[nageru] / h264encode.cpp
index 7bb0f8469a4b8c4daaa5a6551255afc55657e7c5..8f4508835c0a2f3d57a02c5e5c52f258f73344c2 100644 (file)
@@ -198,6 +198,8 @@ public:
        bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex);
        RefCountedGLsync end_frame(int64_t pts, const vector<RefCountedFrame> &input_frames);
        void shutdown();
+       void open_output_file(const std::string &filename);
+       void close_output_file();
 
 private:
        struct storage_task {
@@ -224,9 +226,15 @@ private:
                          int frame_type, int64_t pts, int64_t dts);
        void storage_task_thread();
        void encode_audio(const vector<float> &audio,
+                         vector<float> *audio_queue,
                          int64_t audio_pts,
                          AVCodecContext *ctx,
-                         HTTPD::PacketDestination destination);
+                         const vector<PacketDestination *> &destinations);
+       void encode_audio_one_frame(const float *audio,
+                                   size_t num_samples,  // In each channel.
+                                   int64_t audio_pts,
+                                   AVCodecContext *ctx,
+                                   const vector<PacketDestination *> &destinations);
        void storage_task_enqueue(storage_task task);
        void save_codeddata(storage_task task);
        int render_packedsequence();
@@ -273,7 +281,12 @@ private:
        map<int64_t, vector<float>> pending_audio_frames;  // under frame_queue_mutex
        QSurface *surface;
 
-       AVCodecContext *context_audio;
+       AVCodecContext *context_audio_file;
+       AVCodecContext *context_audio_stream = nullptr;  // nullptr = don't code separate audio for stream.
+
+       vector<float> audio_queue_file;
+       vector<float> audio_queue_stream;
+
        AVFrame *audio_frame = nullptr;
        HTTPD *httpd;
        unique_ptr<FrameReorderer> reorderer;
@@ -335,6 +348,8 @@ private:
        int frame_height;
        int frame_width_mbaligned;
        int frame_height_mbaligned;
+
+       unique_ptr<Mux> file_mux;  // To local disk.
 };
 
 // Supposedly vaRenderPicture() is supposed to destroy the buffer implicitly,
@@ -1621,8 +1636,12 @@ void H264EncoderImpl::save_codeddata(storage_task task)
                        pkt.flags = 0;
                }
                //pkt.duration = 1;
-               httpd->add_packet(pkt, task.pts + global_delay(), task.dts + global_delay(),
-                               global_flags.uncompressed_video_to_http ? HTTPD::DESTINATION_FILE_ONLY : HTTPD::DESTINATION_FILE_AND_HTTP);
+               if (file_mux) {
+                       file_mux->add_packet(pkt, task.pts + global_delay(), task.dts + global_delay());
+               }
+               if (!global_flags.uncompressed_video_to_http) {
+                       httpd->add_packet(pkt, task.pts + global_delay(), task.dts + global_delay());
+               }
        }
        // Encode and add all audio frames up to and including the pts of this video frame.
        for ( ;; ) {
@@ -1639,7 +1658,12 @@ void H264EncoderImpl::save_codeddata(storage_task task)
                        pending_audio_frames.erase(it); 
                }
 
-               encode_audio(audio, audio_pts, context_audio, HTTPD::DESTINATION_FILE_AND_HTTP);
+               if (context_audio_stream) {
+                       encode_audio(audio, &audio_queue_file, audio_pts, context_audio_file, { file_mux.get() });
+                       encode_audio(audio, &audio_queue_stream, audio_pts, context_audio_stream, { httpd });
+               } else {
+                       encode_audio(audio, &audio_queue_file, audio_pts, context_audio_file, { httpd, file_mux.get() });
+               }
 
                if (audio_pts == task.pts) break;
        }
@@ -1647,11 +1671,41 @@ void H264EncoderImpl::save_codeddata(storage_task task)
 
 void H264EncoderImpl::encode_audio(
        const vector<float> &audio,
+       vector<float> *audio_queue,
        int64_t audio_pts,
        AVCodecContext *ctx,
-       HTTPD::PacketDestination destination)
+       const vector<PacketDestination *> &destinations)
 {
-       audio_frame->nb_samples = audio.size() / 2;
+       if (ctx->frame_size == 0) {
+               // No queueing needed.
+               assert(audio_queue->empty());
+               assert(audio.size() % 2 == 0);
+               encode_audio_one_frame(&audio[0], audio.size() / 2, audio_pts, ctx, destinations);
+               return;
+       }
+
+       audio_queue->insert(audio_queue->end(), audio.begin(), audio.end());
+       size_t sample_num;
+       for (sample_num = 0;
+            sample_num + ctx->frame_size * 2 <= audio_queue->size();
+            sample_num += ctx->frame_size * 2) {
+               encode_audio_one_frame(&(*audio_queue)[sample_num],
+                                      ctx->frame_size,
+                                      audio_pts,
+                                      ctx,
+                                      destinations);
+       }
+       audio_queue->erase(audio_queue->begin(), audio_queue->begin() + sample_num);
+}
+
+void H264EncoderImpl::encode_audio_one_frame(
+       const float *audio,
+       size_t num_samples,
+       int64_t audio_pts,
+       AVCodecContext *ctx,
+       const vector<PacketDestination *> &destinations)
+{
+       audio_frame->nb_samples = num_samples;
        audio_frame->channel_layout = AV_CH_LAYOUT_STEREO;
 
        unique_ptr<float[]> planar_samples;
@@ -1659,21 +1713,21 @@ void H264EncoderImpl::encode_audio(
 
        if (ctx->sample_fmt == AV_SAMPLE_FMT_FLTP) {
                audio_frame->format = AV_SAMPLE_FMT_FLTP;
-               planar_samples.reset(new float[audio.size()]);
-               avcodec_fill_audio_frame(audio_frame, 2, AV_SAMPLE_FMT_FLTP, (const uint8_t*)planar_samples.get(), audio.size() * sizeof(float), 0);
-               for (int i = 0; i < audio_frame->nb_samples; ++i) {
+               planar_samples.reset(new float[num_samples * 2]);
+               avcodec_fill_audio_frame(audio_frame, 2, AV_SAMPLE_FMT_FLTP, (const uint8_t*)planar_samples.get(), num_samples * 2 * sizeof(float), 0);
+               for (size_t i = 0; i < num_samples; ++i) {
                        planar_samples[i] = audio[i * 2 + 0];
-                       planar_samples[i + audio_frame->nb_samples] = audio[i * 2 + 1];
+                       planar_samples[i + num_samples] = audio[i * 2 + 1];
                }
        } else {
                assert(ctx->sample_fmt == AV_SAMPLE_FMT_S32);
-               int_samples.reset(new int32_t[audio.size()]);
-               int ret = avcodec_fill_audio_frame(audio_frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), audio.size() * sizeof(int32_t), 1);
+               int_samples.reset(new int32_t[num_samples * 2]);
+               int ret = avcodec_fill_audio_frame(audio_frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), num_samples * 2 * sizeof(int32_t), 1);
                if (ret < 0) {
                        fprintf(stderr, "avcodec_fill_audio_frame() failed with %d\n", ret);
                        exit(1);
                }
-               for (int i = 0; i < audio_frame->nb_samples * 2; ++i) {
+               for (size_t i = 0; i < num_samples * 2; ++i) {
                        if (audio[i] >= 1.0f) {
                                int_samples[i] = 2147483647;
                        } else if (audio[i] <= -1.0f) {
@@ -1689,11 +1743,13 @@ void H264EncoderImpl::encode_audio(
        pkt.data = nullptr;
        pkt.size = 0;
        int got_output = 0;
-       avcodec_encode_audio2(context_audio, &pkt, audio_frame, &got_output);
+       avcodec_encode_audio2(ctx, &pkt, audio_frame, &got_output);
        if (got_output) {
                pkt.stream_index = 1;
                pkt.flags = AV_PKT_FLAG_KEY;
-               httpd->add_packet(pkt, audio_pts + global_delay(), audio_pts + global_delay(), destination);
+               for (PacketDestination *dest : destinations) {
+                       dest->add_packet(pkt, audio_pts + global_delay(), audio_pts + global_delay());
+               }
        }
        // TODO: Delayed frames.
        av_frame_unref(audio_frame);
@@ -1812,7 +1868,12 @@ void init_audio_encoder(const string &codec_name, int bit_rate, AVCodecContext *
 H264EncoderImpl::H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd)
        : current_storage_frame(0), surface(surface), httpd(httpd)
 {
-       init_audio_encoder(AUDIO_OUTPUT_CODEC_NAME, AUDIO_OUTPUT_BIT_RATE, &context_audio);
+       init_audio_encoder(AUDIO_OUTPUT_CODEC_NAME, DEFAULT_AUDIO_OUTPUT_BIT_RATE, &context_audio_file);
+
+       if (!global_flags.stream_audio_codec_name.empty()) {
+               init_audio_encoder(global_flags.stream_audio_codec_name,
+                       global_flags.stream_audio_codec_bitrate, &context_audio_stream);
+       }
 
        audio_frame = av_frame_alloc();
 
@@ -2012,6 +2073,29 @@ void H264EncoderImpl::shutdown()
        is_shutdown = true;
 }
 
+void H264EncoderImpl::open_output_file(const std::string &filename)
+{
+       AVFormatContext *avctx = avformat_alloc_context();
+       avctx->oformat = av_guess_format(NULL, filename.c_str(), NULL);
+       assert(filename.size() < sizeof(avctx->filename) - 1);
+       strcpy(avctx->filename, filename.c_str());
+
+       string url = "file:" + filename;
+       int ret = avio_open2(&avctx->pb, url.c_str(), AVIO_FLAG_WRITE, &avctx->interrupt_callback, NULL);
+       if (ret < 0) {
+               char tmp[AV_ERROR_MAX_STRING_SIZE];
+               fprintf(stderr, "%s: avio_open2() failed: %s\n", filename.c_str(), av_make_error_string(tmp, sizeof(tmp), ret));
+               exit(1);
+       }
+
+       file_mux.reset(new Mux(avctx, frame_width, frame_height, Mux::CODEC_H264, TIMEBASE, DEFAULT_AUDIO_OUTPUT_BIT_RATE));
+}
+
+void H264EncoderImpl::close_output_file()
+{
+        file_mux.reset();
+}
+
 void H264EncoderImpl::encode_thread_func()
 {
        int64_t last_dts = -1;
@@ -2095,7 +2179,7 @@ void H264EncoderImpl::add_packet_for_uncompressed_frame(int64_t pts, const uint8
        pkt.size = frame_width * frame_height * 2;
        pkt.stream_index = 0;
        pkt.flags = AV_PKT_FLAG_KEY;
-       httpd->add_packet(pkt, pts, pts, HTTPD::DESTINATION_HTTP_ONLY);
+       httpd->add_packet(pkt, pts, pts);
 }
 
 namespace {
@@ -2223,4 +2307,12 @@ void H264Encoder::shutdown()
        impl->shutdown();
 }
 
-// Real class.
+void H264Encoder::open_output_file(const std::string &filename)
+{
+       impl->open_output_file(filename);
+}
+
+void H264Encoder::close_output_file()
+{
+       impl->close_output_file();
+}