X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=nageru%2Fmjpeg_encoder.cpp;h=033f67afd9e439db3a5289248bed5b67e8f306ba;hb=67cb1b844a13c84a2df64a603644579310530409;hp=24aa0ebabbdb3d04dbed9ce8542d059c62d96881;hpb=23da824e1d61e37fbe0cc1c0f4d32052022a50ba;p=nageru diff --git a/nageru/mjpeg_encoder.cpp b/nageru/mjpeg_encoder.cpp index 24aa0eb..033f67a 100644 --- a/nageru/mjpeg_encoder.cpp +++ b/nageru/mjpeg_encoder.cpp @@ -120,7 +120,7 @@ MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display) // a situation with only one video stream (and possibly one audio stream) // with known width/height, and we don't need the extra functionality it provides. avctx.reset(avformat_alloc_context()); - avctx->oformat = av_guess_format("mp4", nullptr, nullptr); + avctx->oformat = av_guess_format("nut", nullptr, nullptr); uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE); avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr); @@ -131,9 +131,13 @@ MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display) AVStream *stream = avformat_new_stream(avctx.get(), nullptr); if (stream == nullptr) { fprintf(stderr, "avformat_new_stream() failed\n"); - exit(1); + abort(); } - stream->time_base = AVRational{ 1, TIMEBASE }; + + // FFmpeg is very picky about having audio at 1/48000 timebase, + // no matter what we write. Even though we'd prefer our usual 1/120000, + // put the video on the same one, so that we can have locked audio. + stream->time_base = AVRational{ 1, OUTPUT_FREQUENCY }; stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; stream->codecpar->codec_id = AV_CODEC_ID_MJPEG; @@ -151,6 +155,19 @@ MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display) stream->codecpar->chroma_location = AVCHROMA_LOC_LEFT; stream->codecpar->field_order = AV_FIELD_PROGRESSIVE; } + for (unsigned card_idx = 0; card_idx < global_flags.card_to_mjpeg_stream_export.size(); ++card_idx) { + AVStream *stream = avformat_new_stream(avctx.get(), nullptr); + if (stream == nullptr) { + fprintf(stderr, "avformat_new_stream() failed\n"); + abort(); + } + stream->time_base = AVRational{ 1, OUTPUT_FREQUENCY }; + stream->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; + stream->codecpar->codec_id = AV_CODEC_ID_PCM_S32LE; + stream->codecpar->channel_layout = AV_CH_LAYOUT_STEREO; + stream->codecpar->channels = 2; + stream->codecpar->sample_rate = OUTPUT_FREQUENCY; + } AVDictionary *options = NULL; vector> opts = MUX_OPTS; @@ -159,7 +176,7 @@ MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display) } if (avformat_write_header(avctx.get(), &options) < 0) { fprintf(stderr, "avformat_write_header() failed\n"); - exit(1); + abort(); } // Initialize VA-API. @@ -269,7 +286,7 @@ unique_ptr MJPEGEncoder::try_open_va(const string &va_disp return va_dpy; } -void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset) +void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset, vector audio) { PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)frame->userdata; if (video_format.width == 0 || video_format.height == 0) { @@ -300,7 +317,7 @@ void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFram return; } ++metric_mjpeg_overrun_submitted; - frames_to_be_encoded.push(QueuedFrame{ pts, card_index, frame, video_format, y_offset, cbcr_offset }); + frames_to_be_encoded.push(QueuedFrame{ pts, card_index, frame, video_format, y_offset, cbcr_offset, move(audio) }); any_frames_to_be_encoded.notify_all(); } @@ -341,6 +358,11 @@ void MJPEGEncoder::encoder_thread_func() // Will call back in the receiver thread. encode_jpeg_va(move(qf)); } else { + // Write audio before video, since Futatabi expects it. + if (qf.audio.size() > 0) { + write_audio_packet(qf.pts, qf.card_index, qf.audio); + } + // Encode synchronously, in the same thread. vector jpeg = encode_jpeg_libjpeg(qf); write_mjpeg_packet(qf.pts, qf.card_index, jpeg.data(), jpeg.size()); @@ -364,10 +386,31 @@ void MJPEGEncoder::write_mjpeg_packet(int64_t pts, unsigned card_index, const ui pkt.flags = AV_PKT_FLAG_KEY; AVRational time_base = avctx->streams[pkt.stream_index]->time_base; pkt.pts = pkt.dts = av_rescale_q(pts, AVRational{ 1, TIMEBASE }, time_base); + pkt.duration = 0; + + if (av_write_frame(avctx.get(), &pkt) < 0) { + fprintf(stderr, "av_write_frame() failed\n"); + abort(); + } +} + +void MJPEGEncoder::write_audio_packet(int64_t pts, unsigned card_index, const vector &audio) +{ + AVPacket pkt; + memset(&pkt, 0, sizeof(pkt)); + pkt.buf = nullptr; + pkt.data = reinterpret_cast(const_cast(&audio[0])); + pkt.size = audio.size() * sizeof(audio[0]); + pkt.stream_index = card_index + global_flags.card_to_mjpeg_stream_export.size(); + pkt.flags = AV_PKT_FLAG_KEY; + AVRational time_base = avctx->streams[pkt.stream_index]->time_base; + pkt.pts = pkt.dts = av_rescale_q(pts, AVRational{ 1, TIMEBASE }, time_base); + size_t num_stereo_samples = audio.size() / 2; + pkt.duration = av_rescale_q(num_stereo_samples, AVRational{ 1, OUTPUT_FREQUENCY }, time_base); if (av_write_frame(avctx.get(), &pkt) < 0) { fprintf(stderr, "av_write_frame() failed\n"); - exit(1); + abort(); } } @@ -752,6 +795,11 @@ void MJPEGEncoder::va_receiver_thread_func() frames_encoding.pop(); } + // Write audio before video, since Futatabi expects it. + if (qf.audio.size() > 0) { + write_audio_packet(qf.pts, qf.card_index, qf.audio); + } + VAStatus va_status = vaSyncSurface(va_dpy->va_dpy, qf.resources.surface); CHECK_VASTATUS(va_status, "vaSyncSurface");