]> git.sesse.net Git - nageru/commitdiff
Add a --no-transcode-video flag to Kaeru.
authorSteinar H. Gunderson <steinar+nageru@gunderson.no>
Sun, 4 Apr 2021 09:55:23 +0000 (11:55 +0200)
committerSteinar H. Gunderson <steinar+nageru@gunderson.no>
Sun, 4 Apr 2021 09:55:23 +0000 (11:55 +0200)
This is useful primarily if you want Kaeru to rewrap the stream into
Metacube (for cubemap) and do nothing else with it. Only H.264
is supported for now, since everything else assumes that.

Currently, we only really support --http-mux=mpegts; other muxes seem
to have issues.

nageru/ffmpeg_capture.cpp
nageru/ffmpeg_capture.h
nageru/flags.cpp
nageru/flags.h
nageru/kaeru.cpp

index a8b6f612a06a470eb7427648497dffc4aaf47295..0a328e0182570a8953407541954a2ee5ac7763aa 100644 (file)
@@ -438,9 +438,11 @@ void FFmpegCapture::send_disconnected_frame()
                        memset(video_frame.data + frame_width * frame_height, 128, frame_width * frame_height);  // Valid for both NV12 and planar.
                }
 
-               frame_callback(-1, AVRational{1, TIMEBASE}, -1, AVRational{1, TIMEBASE}, timecode++,
-                       video_frame, /*video_offset=*/0, video_format,
-                       FrameAllocator::Frame(), /*audio_offset=*/0, AudioFormat());
+               if (frame_callback != nullptr) {
+                       frame_callback(-1, AVRational{1, TIMEBASE}, -1, AVRational{1, TIMEBASE}, timecode++,
+                               video_frame, /*video_offset=*/0, video_format,
+                               FrameAllocator::Frame(), /*audio_offset=*/0, AudioFormat());
+               }
                last_frame_was_connected = false;
        }
 
@@ -734,9 +736,11 @@ bool FFmpegCapture::play_video(const string &pathname)
                                        timecode += MAX_FPS * 2 + 1;
                                }
                                last_neutral_color = get_neutral_color(frame->metadata);
-                               frame_callback(frame->pts, video_timebase, audio_pts, audio_timebase, timecode++,
-                                       video_frame.get_and_release(), 0, video_format,
-                                       audio_frame.get_and_release(), 0, audio_format);
+                               if (frame_callback != nullptr) {
+                                       frame_callback(frame->pts, video_timebase, audio_pts, audio_timebase, timecode++,
+                                               video_frame.get_and_release(), 0, video_format,
+                                               audio_frame.get_and_release(), 0, audio_format);
+                               }
                                first_frame = false;
                                last_frame = steady_clock::now();
                                last_frame_was_connected = true;
@@ -841,6 +845,9 @@ AVFrameWithDeleter FFmpegCapture::decode_frame(AVFormatContext *format_ctx, AVCo
                        if (pkt.stream_index == audio_stream_index && audio_callback != nullptr) {
                                audio_callback(&pkt, format_ctx->streams[audio_stream_index]->time_base);
                        }
+                       if (pkt.stream_index == video_stream_index && video_callback != nullptr) {
+                               video_callback(&pkt, format_ctx->streams[video_stream_index]->time_base);
+                       }
                        if (pkt.stream_index == video_stream_index) {
                                if (avcodec_send_packet(video_codec_ctx, &pkt) < 0) {
                                        fprintf(stderr, "%s: Cannot send packet to video codec.\n", pathname.c_str());
index 1fc8f7128f516d6b4d06431b3049256f7bbdf7f7..2ab9481aad6d351e5597f86cc0df3315a39d6606 100644 (file)
@@ -159,9 +159,13 @@ public:
                        std::placeholders::_11);
        }
 
-       // FFmpegCapture-specific callback that gives the raw audio.
-       typedef std::function<void(const AVPacket *pkt, const AVRational timebase)> audio_callback_t;
-       void set_audio_callback(audio_callback_t callback)
+       // FFmpegCapture-specific callback that gives the raw audio/video.
+       typedef std::function<void(const AVPacket *pkt, const AVRational timebase)> packet_callback_t;
+       void set_video_callback(packet_callback_t callback)
+       {
+               video_callback = callback;
+       }
+       void set_audio_callback(packet_callback_t callback)
        {
                audio_callback = callback;
        }
@@ -297,7 +301,8 @@ private:
        std::unique_ptr<bmusb::FrameAllocator> owned_video_frame_allocator;
        std::unique_ptr<bmusb::FrameAllocator> owned_audio_frame_allocator;
        frame_callback_t frame_callback = nullptr;
-       audio_callback_t audio_callback = nullptr;
+       packet_callback_t video_callback = nullptr;
+       packet_callback_t audio_callback = nullptr;
 
        SwsContextWithDeleter sws_ctx;
        int sws_last_width = -1, sws_last_height = -1, sws_last_src_format = -1;
index 402e33d7eca7a0be7d05d6ae87a4025f78ad9d0f..769263bb771adfb55667f39170d6dd4492100005 100644 (file)
@@ -40,6 +40,7 @@ enum LongOption {
        OPTION_HTTP_PORT,
        OPTION_SRT_PORT,
        OPTION_NO_SRT,
+       OPTION_NO_TRANSCODE_VIDEO,
        OPTION_NO_TRANSCODE_AUDIO,
        OPTION_DISABLE_AUDIO,
        OPTION_FLAT_AUDIO,
@@ -178,6 +179,8 @@ void usage(Program program)
        fprintf(stderr, "                                  (default is %d)\n", DEFAULT_SRT_PORT);
        fprintf(stderr, "      --no-srt                    disable receiving SRT streams\n");
        if (program == PROGRAM_KAERU) {
+               fprintf(stderr, "      --no-transcode-video        copy encoded video raw from the source stream\n");
+               fprintf(stderr, "                                    (experimental, must be H.264)\n");
                fprintf(stderr, "      --no-transcode-audio        copy encoded audio raw from the source stream\n");
                fprintf(stderr, "                                    (requires --http-audio-codec= to be set)\n");
                fprintf(stderr, "      --disable-audio             do not include any audio in the stream\n");
@@ -266,6 +269,7 @@ void parse_flags(Program program, int argc, char * const argv[])
                { "http-port", required_argument, 0, OPTION_HTTP_PORT },
                { "srt-port", required_argument, 0, OPTION_SRT_PORT },
                { "no-srt", no_argument, 0, OPTION_NO_SRT },
+               { "no-transcode-video", no_argument, 0, OPTION_NO_TRANSCODE_VIDEO },
                { "no-transcode-audio", no_argument, 0, OPTION_NO_TRANSCODE_AUDIO },
                { "disable-audio", no_argument, 0, OPTION_DISABLE_AUDIO },
                { "flat-audio", no_argument, 0, OPTION_FLAT_AUDIO },
@@ -392,6 +396,9 @@ void parse_flags(Program program, int argc, char * const argv[])
                case OPTION_NO_SRT:
                        global_flags.srt_port = -1;
                        break;
+               case OPTION_NO_TRANSCODE_VIDEO:
+                       global_flags.transcode_video = false;
+                       break;
                case OPTION_NO_TRANSCODE_AUDIO:
                        global_flags.transcode_audio = false;
                        break;
index bbe84c254e17c73f6977e983134ffe04f39c1176..dc9c585d0efb5ae718687110b0d9c76683919345 100644 (file)
@@ -66,6 +66,7 @@ struct Flags {
        bool ten_bit_input = false;
        bool ten_bit_output = false;  // Implies x264_video_to_disk == true and x264_bit_depth == 10.
        YCbCrInterpretation ycbcr_interpretation[MAX_VIDEO_CARDS];
+       bool transcode_video = true;  // Kaeru only.
        bool transcode_audio = true;  // Kaeru only.
        bool enable_audio = true;  // Kaeru only. If false, then transcode_audio is also false.
        int x264_bit_depth = 8;  // Not user-settable.
index e4fba7f03630e399f0000c5b42c2ef2e837754f6..9dc39921d93929cc6fb388226b148ac33cdf0bab 100644 (file)
@@ -138,9 +138,9 @@ void video_frame_callback(FFmpegCapture *video, X264Encoder *x264_encoder, Audio
        }
 }
 
-void audio_frame_callback(Mux *mux, const AVPacket *pkt, AVRational timebase)
+void raw_packet_callback(Mux *mux, int stream_index, const AVPacket *pkt, AVRational timebase)
 {
-       mux->add_packet(*pkt, pkt->pts, pkt->dts == AV_NOPTS_VALUE ? pkt->pts : pkt->dts, timebase, /*stream_index=*/1);
+       mux->add_packet(*pkt, pkt->pts, pkt->dts == AV_NOPTS_VALUE ? pkt->pts : pkt->dts, timebase, stream_index);
 }
 
 void adjust_bitrate(int signal)
@@ -205,14 +205,20 @@ int main(int argc, char *argv[])
        if (global_flags.transcode_audio) {
                audio_encoder->add_mux(http_mux.get());
        }
-       x264_encoder->add_mux(http_mux.get());
+       if (global_flags.transcode_video) {
+               x264_encoder->add_mux(http_mux.get());
+       }
        global_x264_encoder = x264_encoder.get();
 
        FFmpegCapture video(argv[optind], global_flags.width, global_flags.height);
        video.set_pixel_format(FFmpegCapture::PixelFormat_NV12);
-       video.set_frame_callback(bind(video_frame_callback, &video, x264_encoder.get(), audio_encoder.get(), _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11));
+       if (global_flags.transcode_video) {
+               video.set_frame_callback(bind(video_frame_callback, &video, x264_encoder.get(), audio_encoder.get(), _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11));
+       } else {
+               video.set_video_callback(bind(raw_packet_callback, http_mux.get(), /*stream_index=*/0, _1, _2));
+       }
        if (!global_flags.transcode_audio && global_flags.enable_audio) {
-               video.set_audio_callback(bind(audio_frame_callback, http_mux.get(), _1, _2));
+               video.set_audio_callback(bind(raw_packet_callback, http_mux.get(), /*stream_index=*/1, _1, _2));
        }
        video.configure_card();
        video.start_bm_capture();