X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=nageru%2Fkaeru.cpp;h=ce58aef9473f3da33112ca5d72e5d8ca5da24ded;hb=bdc9f1ea04141e71906d486f9d254c3346835e72;hp=b10324c8b4091516072294a521ce66b76760041e;hpb=0aab96d4c270758b72cb09606ce183b41588c746;p=nageru diff --git a/nageru/kaeru.cpp b/nageru/kaeru.cpp index b10324c..ce58aef 100644 --- a/nageru/kaeru.cpp +++ b/nageru/kaeru.cpp @@ -18,6 +18,10 @@ #include #include +extern "C" { +#include +} + using namespace bmusb; using namespace movit; using namespace std; @@ -47,18 +51,19 @@ int write_packet(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType ty type = AVIO_DATA_MARKER_SYNC_POINT; } + HTTPD::StreamID stream_id{ HTTPD::MAIN_STREAM, 0 }; if (type == AVIO_DATA_MARKER_HEADER) { stream_mux_header.append((char *)buf, buf_size); - httpd->set_header(HTTPD::MAIN_STREAM, stream_mux_header); + httpd->set_header(stream_id, stream_mux_header); } else { - httpd->add_data(HTTPD::MAIN_STREAM, (char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 }); + httpd->add_data(stream_id, (char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 }); } return buf_size; } } // namespace -unique_ptr create_mux(HTTPD *httpd, AVOutputFormat *oformat, X264Encoder *x264_encoder, AudioEncoder *audio_encoder) +unique_ptr create_mux(HTTPD *httpd, const AVOutputFormat *oformat, X264Encoder *x264_encoder, AudioEncoder *audio_encoder) { AVFormatContext *avctx = avformat_alloc_context(); avctx->oformat = oformat; @@ -74,7 +79,7 @@ unique_ptr create_mux(HTTPD *httpd, AVOutputFormat *oformat, X264Encoder *x // If audio is disabled (ie., we won't ever see any audio packets), // set nullptr here to also not include the stream in the mux. AVCodecParameters *audio_codecpar = - global_flags.enable_audio ? audio_encoder->get_codec_parameters().get() : nullptr; + global_flags.enable_audio ? audio_encoder->get_codec_parameters().release() : nullptr; unique_ptr mux; mux.reset(new Mux(avctx, global_flags.width, global_flags.height, Mux::CODEC_H264, video_extradata, audio_codecpar, @@ -137,9 +142,39 @@ void video_frame_callback(FFmpegCapture *video, X264Encoder *x264_encoder, Audio } } -void audio_frame_callback(Mux *mux, const AVPacket *pkt, AVRational timebase) +void raw_packet_callback(Mux *mux, int stream_index, const AVPacket *pkt, AVRational timebase) +{ + mux->add_packet(*pkt, pkt->pts, pkt->dts == AV_NOPTS_VALUE ? pkt->pts : pkt->dts, timebase, stream_index); +} + +void filter_packet_callback(Mux *mux, int stream_index, AVBSFContext *bsfctx, const AVPacket *pkt, AVRational timebase) { - mux->add_packet(*pkt, pkt->pts, pkt->dts == AV_NOPTS_VALUE ? pkt->pts : pkt->dts, timebase, /*stream_index=*/1); + if (pkt->size <= 2 || pkt->data[0] != 0xff || (pkt->data[1] & 0xf0) != 0xf0) { + // Not ADTS data, so just pass it through. + mux->add_packet(*pkt, pkt->pts, pkt->dts == AV_NOPTS_VALUE ? pkt->pts : pkt->dts, timebase, stream_index); + return; + } + + AVPacket *in_pkt = av_packet_clone(pkt); + unique_ptr in_pkt_cleanup(in_pkt, av_packet_unref); + int err = av_bsf_send_packet(bsfctx, in_pkt); + if (err < 0) { + fprintf(stderr, "av_bsf_send_packet() failed with %d, ignoring\n", err); + } + for ( ;; ) { + AVPacket out_pkt; + unique_ptr pkt_cleanup(&out_pkt, av_packet_unref); + av_init_packet(&out_pkt); + err = av_bsf_receive_packet(bsfctx, &out_pkt); + if (err == AVERROR(EAGAIN)) { + break; + } + if (err < 0) { + fprintf(stderr, "av_bsf_receive_packet() failed with %d, ignoring\n", err); + return; + } + mux->add_packet(out_pkt, out_pkt.pts, out_pkt.dts == AV_NOPTS_VALUE ? out_pkt.pts : out_pkt.dts, timebase, stream_index); + } } void adjust_bitrate(int signal) @@ -180,7 +215,7 @@ int main(int argc, char *argv[]) usage(PROGRAM_KAERU); abort(); } - global_flags.num_cards = 1; // For latency metrics. + global_flags.max_num_cards = 1; // For latency metrics. #if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(58, 9, 100) av_register_all(); @@ -189,7 +224,7 @@ int main(int argc, char *argv[]) HTTPD httpd; - AVOutputFormat *oformat = av_guess_format(global_flags.stream_mux_name.c_str(), nullptr, nullptr); + const AVOutputFormat *oformat = av_guess_format(global_flags.stream_mux_name.c_str(), nullptr, nullptr); assert(oformat != nullptr); unique_ptr audio_encoder; @@ -199,19 +234,39 @@ int main(int argc, char *argv[]) audio_encoder.reset(new AudioEncoder(global_flags.stream_audio_codec_name, global_flags.stream_audio_codec_bitrate, oformat)); } - unique_ptr x264_encoder(new X264Encoder(oformat)); + unique_ptr x264_encoder(new X264Encoder(oformat, /*use_separate_disk_params=*/false)); unique_ptr http_mux = create_mux(&httpd, oformat, x264_encoder.get(), audio_encoder.get()); if (global_flags.transcode_audio) { audio_encoder->add_mux(http_mux.get()); } - x264_encoder->add_mux(http_mux.get()); + if (global_flags.transcode_video) { + x264_encoder->add_mux(http_mux.get()); + } global_x264_encoder = x264_encoder.get(); FFmpegCapture video(argv[optind], global_flags.width, global_flags.height); video.set_pixel_format(FFmpegCapture::PixelFormat_NV12); - video.set_frame_callback(bind(video_frame_callback, &video, x264_encoder.get(), audio_encoder.get(), _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11)); + if (global_flags.transcode_video) { + video.set_frame_callback(bind(video_frame_callback, &video, x264_encoder.get(), audio_encoder.get(), _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11)); + } else { + video.set_video_callback(bind(raw_packet_callback, http_mux.get(), /*stream_index=*/0, _1, _2)); + } if (!global_flags.transcode_audio && global_flags.enable_audio) { - video.set_audio_callback(bind(audio_frame_callback, http_mux.get(), _1, _2)); + AVBSFContext *bsfctx = nullptr; + if (strcmp(oformat->name, "mp4") == 0 && strcmp(audio_encoder->get_codec()->name, "aac") == 0) { + // We need to insert the aac_adtstoasc filter, seemingly (or we will get warnings to do so). + const AVBitStreamFilter *filter = av_bsf_get_by_name("aac_adtstoasc"); + int err = av_bsf_alloc(filter, &bsfctx); + if (err < 0) { + fprintf(stderr, "av_bsf_alloc() failed with %d\n", err); + exit(1); + } + } + if (bsfctx == nullptr) { + video.set_audio_callback(bind(raw_packet_callback, http_mux.get(), /*stream_index=*/1, _1, _2)); + } else { + video.set_audio_callback(bind(filter_packet_callback, http_mux.get(), /*stream_index=*/1, bsfctx, _1, _2)); + } } video.configure_card(); video.start_bm_capture();