]> git.sesse.net Git - nageru/blobdiff - nageru/mjpeg_encoder.cpp
Include the raw audio in the MJPEG output.
[nageru] / nageru / mjpeg_encoder.cpp
index 46bb94c7639112f76b031c96c63acab9560b99ce..033f67afd9e439db3a5289248bed5b67e8f306ba 100644 (file)
@@ -120,7 +120,7 @@ MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display)
        // a situation with only one video stream (and possibly one audio stream)
        // with known width/height, and we don't need the extra functionality it provides.
        avctx.reset(avformat_alloc_context());
-       avctx->oformat = av_guess_format("mp4", nullptr, nullptr);
+       avctx->oformat = av_guess_format("nut", nullptr, nullptr);
 
        uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
        avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
@@ -133,7 +133,11 @@ MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display)
                        fprintf(stderr, "avformat_new_stream() failed\n");
                        abort();
                }
-               stream->time_base = AVRational{ 1, TIMEBASE };
+
+               // FFmpeg is very picky about having audio at 1/48000 timebase,
+               // no matter what we write. Even though we'd prefer our usual 1/120000,
+               // put the video on the same one, so that we can have locked audio.
+               stream->time_base = AVRational{ 1, OUTPUT_FREQUENCY };
                stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
                stream->codecpar->codec_id = AV_CODEC_ID_MJPEG;
 
@@ -151,6 +155,19 @@ MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display)
                stream->codecpar->chroma_location = AVCHROMA_LOC_LEFT;
                stream->codecpar->field_order = AV_FIELD_PROGRESSIVE;
        }
+       for (unsigned card_idx = 0; card_idx < global_flags.card_to_mjpeg_stream_export.size(); ++card_idx) {
+               AVStream *stream = avformat_new_stream(avctx.get(), nullptr);
+               if (stream == nullptr) {
+                       fprintf(stderr, "avformat_new_stream() failed\n");
+                       abort();
+               }
+               stream->time_base = AVRational{ 1, OUTPUT_FREQUENCY };
+               stream->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
+               stream->codecpar->codec_id = AV_CODEC_ID_PCM_S32LE;
+               stream->codecpar->channel_layout = AV_CH_LAYOUT_STEREO;
+               stream->codecpar->channels = 2;
+               stream->codecpar->sample_rate = OUTPUT_FREQUENCY;
+       }
 
        AVDictionary *options = NULL;
        vector<pair<string, string>> opts = MUX_OPTS;
@@ -269,7 +286,7 @@ unique_ptr<VADisplayWithCleanup> MJPEGEncoder::try_open_va(const string &va_disp
        return va_dpy;
 }
 
-void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset)
+void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset, vector<int32_t> audio)
 {
        PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)frame->userdata;
        if (video_format.width == 0 || video_format.height == 0) {
@@ -300,7 +317,7 @@ void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFram
                return;
        }
        ++metric_mjpeg_overrun_submitted;
-       frames_to_be_encoded.push(QueuedFrame{ pts, card_index, frame, video_format, y_offset, cbcr_offset });
+       frames_to_be_encoded.push(QueuedFrame{ pts, card_index, frame, video_format, y_offset, cbcr_offset, move(audio) });
        any_frames_to_be_encoded.notify_all();
 }
 
@@ -341,6 +358,11 @@ void MJPEGEncoder::encoder_thread_func()
                        // Will call back in the receiver thread.
                        encode_jpeg_va(move(qf));
                } else {
+                       // Write audio before video, since Futatabi expects it.
+                       if (qf.audio.size() > 0) {
+                               write_audio_packet(qf.pts, qf.card_index, qf.audio);
+                       }
+
                        // Encode synchronously, in the same thread.
                        vector<uint8_t> jpeg = encode_jpeg_libjpeg(qf);
                        write_mjpeg_packet(qf.pts, qf.card_index, jpeg.data(), jpeg.size());
@@ -364,6 +386,27 @@ void MJPEGEncoder::write_mjpeg_packet(int64_t pts, unsigned card_index, const ui
        pkt.flags = AV_PKT_FLAG_KEY;
        AVRational time_base = avctx->streams[pkt.stream_index]->time_base;
        pkt.pts = pkt.dts = av_rescale_q(pts, AVRational{ 1, TIMEBASE }, time_base);
+       pkt.duration = 0;
+
+       if (av_write_frame(avctx.get(), &pkt) < 0) {
+               fprintf(stderr, "av_write_frame() failed\n");
+               abort();
+       }
+}
+
+void MJPEGEncoder::write_audio_packet(int64_t pts, unsigned card_index, const vector<int32_t> &audio)
+{
+       AVPacket pkt;
+       memset(&pkt, 0, sizeof(pkt));
+       pkt.buf = nullptr;
+       pkt.data = reinterpret_cast<uint8_t *>(const_cast<int32_t *>(&audio[0]));
+       pkt.size = audio.size() * sizeof(audio[0]);
+       pkt.stream_index = card_index + global_flags.card_to_mjpeg_stream_export.size();
+       pkt.flags = AV_PKT_FLAG_KEY;
+       AVRational time_base = avctx->streams[pkt.stream_index]->time_base;
+       pkt.pts = pkt.dts = av_rescale_q(pts, AVRational{ 1, TIMEBASE }, time_base);
+       size_t num_stereo_samples = audio.size() / 2;
+       pkt.duration = av_rescale_q(num_stereo_samples, AVRational{ 1, OUTPUT_FREQUENCY }, time_base);
 
        if (av_write_frame(avctx.get(), &pkt) < 0) {
                fprintf(stderr, "av_write_frame() failed\n");
@@ -752,6 +795,11 @@ void MJPEGEncoder::va_receiver_thread_func()
                        frames_encoding.pop();
                }
 
+               // Write audio before video, since Futatabi expects it.
+               if (qf.audio.size() > 0) {
+                       write_audio_packet(qf.pts, qf.card_index, qf.audio);
+               }
+
                VAStatus va_status = vaSyncSurface(va_dpy->va_dpy, qf.resources.surface);
                CHECK_VASTATUS(va_status, "vaSyncSurface");