This is useful primarily if you want Kaeru to rewrap the stream into
Metacube (for cubemap) and do nothing else with it. Only H.264
is supported for now, since everything else assumes that.
Currently, we only really support --http-mux=mpegts; other muxes seem
to have issues.
memset(video_frame.data + frame_width * frame_height, 128, frame_width * frame_height); // Valid for both NV12 and planar.
}
memset(video_frame.data + frame_width * frame_height, 128, frame_width * frame_height); // Valid for both NV12 and planar.
}
- frame_callback(-1, AVRational{1, TIMEBASE}, -1, AVRational{1, TIMEBASE}, timecode++,
- video_frame, /*video_offset=*/0, video_format,
- FrameAllocator::Frame(), /*audio_offset=*/0, AudioFormat());
+ if (frame_callback != nullptr) {
+ frame_callback(-1, AVRational{1, TIMEBASE}, -1, AVRational{1, TIMEBASE}, timecode++,
+ video_frame, /*video_offset=*/0, video_format,
+ FrameAllocator::Frame(), /*audio_offset=*/0, AudioFormat());
+ }
last_frame_was_connected = false;
}
last_frame_was_connected = false;
}
timecode += MAX_FPS * 2 + 1;
}
last_neutral_color = get_neutral_color(frame->metadata);
timecode += MAX_FPS * 2 + 1;
}
last_neutral_color = get_neutral_color(frame->metadata);
- frame_callback(frame->pts, video_timebase, audio_pts, audio_timebase, timecode++,
- video_frame.get_and_release(), 0, video_format,
- audio_frame.get_and_release(), 0, audio_format);
+ if (frame_callback != nullptr) {
+ frame_callback(frame->pts, video_timebase, audio_pts, audio_timebase, timecode++,
+ video_frame.get_and_release(), 0, video_format,
+ audio_frame.get_and_release(), 0, audio_format);
+ }
first_frame = false;
last_frame = steady_clock::now();
last_frame_was_connected = true;
first_frame = false;
last_frame = steady_clock::now();
last_frame_was_connected = true;
if (pkt.stream_index == audio_stream_index && audio_callback != nullptr) {
audio_callback(&pkt, format_ctx->streams[audio_stream_index]->time_base);
}
if (pkt.stream_index == audio_stream_index && audio_callback != nullptr) {
audio_callback(&pkt, format_ctx->streams[audio_stream_index]->time_base);
}
+ if (pkt.stream_index == video_stream_index && video_callback != nullptr) {
+ video_callback(&pkt, format_ctx->streams[video_stream_index]->time_base);
+ }
if (pkt.stream_index == video_stream_index) {
if (avcodec_send_packet(video_codec_ctx, &pkt) < 0) {
fprintf(stderr, "%s: Cannot send packet to video codec.\n", pathname.c_str());
if (pkt.stream_index == video_stream_index) {
if (avcodec_send_packet(video_codec_ctx, &pkt) < 0) {
fprintf(stderr, "%s: Cannot send packet to video codec.\n", pathname.c_str());
std::placeholders::_11);
}
std::placeholders::_11);
}
- // FFmpegCapture-specific callback that gives the raw audio.
- typedef std::function<void(const AVPacket *pkt, const AVRational timebase)> audio_callback_t;
- void set_audio_callback(audio_callback_t callback)
+ // FFmpegCapture-specific callback that gives the raw audio/video.
+ typedef std::function<void(const AVPacket *pkt, const AVRational timebase)> packet_callback_t;
+ void set_video_callback(packet_callback_t callback)
+ {
+ video_callback = callback;
+ }
+ void set_audio_callback(packet_callback_t callback)
{
audio_callback = callback;
}
{
audio_callback = callback;
}
std::unique_ptr<bmusb::FrameAllocator> owned_video_frame_allocator;
std::unique_ptr<bmusb::FrameAllocator> owned_audio_frame_allocator;
frame_callback_t frame_callback = nullptr;
std::unique_ptr<bmusb::FrameAllocator> owned_video_frame_allocator;
std::unique_ptr<bmusb::FrameAllocator> owned_audio_frame_allocator;
frame_callback_t frame_callback = nullptr;
- audio_callback_t audio_callback = nullptr;
+ packet_callback_t video_callback = nullptr;
+ packet_callback_t audio_callback = nullptr;
SwsContextWithDeleter sws_ctx;
int sws_last_width = -1, sws_last_height = -1, sws_last_src_format = -1;
SwsContextWithDeleter sws_ctx;
int sws_last_width = -1, sws_last_height = -1, sws_last_src_format = -1;
OPTION_HTTP_PORT,
OPTION_SRT_PORT,
OPTION_NO_SRT,
OPTION_HTTP_PORT,
OPTION_SRT_PORT,
OPTION_NO_SRT,
+ OPTION_NO_TRANSCODE_VIDEO,
OPTION_NO_TRANSCODE_AUDIO,
OPTION_DISABLE_AUDIO,
OPTION_FLAT_AUDIO,
OPTION_NO_TRANSCODE_AUDIO,
OPTION_DISABLE_AUDIO,
OPTION_FLAT_AUDIO,
fprintf(stderr, " (default is %d)\n", DEFAULT_SRT_PORT);
fprintf(stderr, " --no-srt disable receiving SRT streams\n");
if (program == PROGRAM_KAERU) {
fprintf(stderr, " (default is %d)\n", DEFAULT_SRT_PORT);
fprintf(stderr, " --no-srt disable receiving SRT streams\n");
if (program == PROGRAM_KAERU) {
+ fprintf(stderr, " --no-transcode-video copy encoded video raw from the source stream\n");
+ fprintf(stderr, " (experimental, must be H.264)\n");
fprintf(stderr, " --no-transcode-audio copy encoded audio raw from the source stream\n");
fprintf(stderr, " (requires --http-audio-codec= to be set)\n");
fprintf(stderr, " --disable-audio do not include any audio in the stream\n");
fprintf(stderr, " --no-transcode-audio copy encoded audio raw from the source stream\n");
fprintf(stderr, " (requires --http-audio-codec= to be set)\n");
fprintf(stderr, " --disable-audio do not include any audio in the stream\n");
{ "http-port", required_argument, 0, OPTION_HTTP_PORT },
{ "srt-port", required_argument, 0, OPTION_SRT_PORT },
{ "no-srt", no_argument, 0, OPTION_NO_SRT },
{ "http-port", required_argument, 0, OPTION_HTTP_PORT },
{ "srt-port", required_argument, 0, OPTION_SRT_PORT },
{ "no-srt", no_argument, 0, OPTION_NO_SRT },
+ { "no-transcode-video", no_argument, 0, OPTION_NO_TRANSCODE_VIDEO },
{ "no-transcode-audio", no_argument, 0, OPTION_NO_TRANSCODE_AUDIO },
{ "disable-audio", no_argument, 0, OPTION_DISABLE_AUDIO },
{ "flat-audio", no_argument, 0, OPTION_FLAT_AUDIO },
{ "no-transcode-audio", no_argument, 0, OPTION_NO_TRANSCODE_AUDIO },
{ "disable-audio", no_argument, 0, OPTION_DISABLE_AUDIO },
{ "flat-audio", no_argument, 0, OPTION_FLAT_AUDIO },
case OPTION_NO_SRT:
global_flags.srt_port = -1;
break;
case OPTION_NO_SRT:
global_flags.srt_port = -1;
break;
+ case OPTION_NO_TRANSCODE_VIDEO:
+ global_flags.transcode_video = false;
+ break;
case OPTION_NO_TRANSCODE_AUDIO:
global_flags.transcode_audio = false;
break;
case OPTION_NO_TRANSCODE_AUDIO:
global_flags.transcode_audio = false;
break;
bool ten_bit_input = false;
bool ten_bit_output = false; // Implies x264_video_to_disk == true and x264_bit_depth == 10.
YCbCrInterpretation ycbcr_interpretation[MAX_VIDEO_CARDS];
bool ten_bit_input = false;
bool ten_bit_output = false; // Implies x264_video_to_disk == true and x264_bit_depth == 10.
YCbCrInterpretation ycbcr_interpretation[MAX_VIDEO_CARDS];
+ bool transcode_video = true; // Kaeru only.
bool transcode_audio = true; // Kaeru only.
bool enable_audio = true; // Kaeru only. If false, then transcode_audio is also false.
int x264_bit_depth = 8; // Not user-settable.
bool transcode_audio = true; // Kaeru only.
bool enable_audio = true; // Kaeru only. If false, then transcode_audio is also false.
int x264_bit_depth = 8; // Not user-settable.
-void audio_frame_callback(Mux *mux, const AVPacket *pkt, AVRational timebase)
+void raw_packet_callback(Mux *mux, int stream_index, const AVPacket *pkt, AVRational timebase)
- mux->add_packet(*pkt, pkt->pts, pkt->dts == AV_NOPTS_VALUE ? pkt->pts : pkt->dts, timebase, /*stream_index=*/1);
+ mux->add_packet(*pkt, pkt->pts, pkt->dts == AV_NOPTS_VALUE ? pkt->pts : pkt->dts, timebase, stream_index);
}
void adjust_bitrate(int signal)
}
void adjust_bitrate(int signal)
if (global_flags.transcode_audio) {
audio_encoder->add_mux(http_mux.get());
}
if (global_flags.transcode_audio) {
audio_encoder->add_mux(http_mux.get());
}
- x264_encoder->add_mux(http_mux.get());
+ if (global_flags.transcode_video) {
+ x264_encoder->add_mux(http_mux.get());
+ }
global_x264_encoder = x264_encoder.get();
FFmpegCapture video(argv[optind], global_flags.width, global_flags.height);
video.set_pixel_format(FFmpegCapture::PixelFormat_NV12);
global_x264_encoder = x264_encoder.get();
FFmpegCapture video(argv[optind], global_flags.width, global_flags.height);
video.set_pixel_format(FFmpegCapture::PixelFormat_NV12);
- video.set_frame_callback(bind(video_frame_callback, &video, x264_encoder.get(), audio_encoder.get(), _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11));
+ if (global_flags.transcode_video) {
+ video.set_frame_callback(bind(video_frame_callback, &video, x264_encoder.get(), audio_encoder.get(), _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11));
+ } else {
+ video.set_video_callback(bind(raw_packet_callback, http_mux.get(), /*stream_index=*/0, _1, _2));
+ }
if (!global_flags.transcode_audio && global_flags.enable_audio) {
if (!global_flags.transcode_audio && global_flags.enable_audio) {
- video.set_audio_callback(bind(audio_frame_callback, http_mux.get(), _1, _2));
+ video.set_audio_callback(bind(raw_packet_callback, http_mux.get(), /*stream_index=*/1, _1, _2));
}
video.configure_card();
video.start_bm_capture();
}
video.configure_card();
video.start_bm_capture();