return buf_size;
}
+namespace {
+
+void add_video_stream(AVFormatContext *avctx)
+{
+ AVStream *stream = avformat_new_stream(avctx, nullptr);
+ if (stream == nullptr) {
+ fprintf(stderr, "avformat_new_stream() failed\n");
+ abort();
+ }
+
+ // FFmpeg is very picky about having audio at 1/48000 timebase,
+ // no matter what we write. Even though we'd prefer our usual 1/120000,
+ // put the video on the same one, so that we can have locked audio.
+ stream->time_base = AVRational{ 1, OUTPUT_FREQUENCY };
+ stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
+ stream->codecpar->codec_id = AV_CODEC_ID_MJPEG;
+
+ // Used for aspect ratio only. Can change without notice (the mux won't care).
+ stream->codecpar->width = global_flags.width;
+ stream->codecpar->height = global_flags.height;
+
+ // TODO: We could perhaps use the interpretation for each card here
+ // (or at least the command-line flags) instead of the defaults,
+ // but what would we do when they change?
+ stream->codecpar->color_primaries = AVCOL_PRI_BT709;
+ stream->codecpar->color_trc = AVCOL_TRC_IEC61966_2_1;
+ stream->codecpar->color_space = AVCOL_SPC_BT709;
+ stream->codecpar->color_range = AVCOL_RANGE_MPEG;
+ stream->codecpar->chroma_location = AVCHROMA_LOC_LEFT;
+ stream->codecpar->field_order = AV_FIELD_PROGRESSIVE;
+}
+
+void add_audio_stream(AVFormatContext *avctx)
+{
+ AVStream *stream = avformat_new_stream(avctx, nullptr);
+ if (stream == nullptr) {
+ fprintf(stderr, "avformat_new_stream() failed\n");
+ abort();
+ }
+ stream->time_base = AVRational{ 1, OUTPUT_FREQUENCY };
+ stream->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
+ stream->codecpar->codec_id = AV_CODEC_ID_PCM_S32LE;
+ stream->codecpar->channel_layout = AV_CH_LAYOUT_STEREO;
+ stream->codecpar->channels = 2;
+ stream->codecpar->sample_rate = OUTPUT_FREQUENCY;
+}
+
+void finalize_mux(AVFormatContext *avctx)
+{
+ AVDictionary *options = NULL;
+ vector<pair<string, string>> opts = MUX_OPTS;
+ for (pair<string, string> opt : opts) {
+ av_dict_set(&options, opt.first.c_str(), opt.second.c_str(), 0);
+ }
+ if (avformat_write_header(avctx, &options) < 0) {
+ fprintf(stderr, "avformat_write_header() failed\n");
+ abort();
+ }
+}
+
+} // namespace
+
MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display)
: httpd(httpd)
{
avctx->flags = AVFMT_FLAG_CUSTOM_IO;
for (unsigned card_idx = 0; card_idx < global_flags.card_to_mjpeg_stream_export.size(); ++card_idx) {
- AVStream *stream = avformat_new_stream(avctx.get(), nullptr);
- if (stream == nullptr) {
- fprintf(stderr, "avformat_new_stream() failed\n");
- abort();
- }
-
- // FFmpeg is very picky about having audio at 1/48000 timebase,
- // no matter what we write. Even though we'd prefer our usual 1/120000,
- // put the video on the same one, so that we can have locked audio.
- stream->time_base = AVRational{ 1, OUTPUT_FREQUENCY };
- stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
- stream->codecpar->codec_id = AV_CODEC_ID_MJPEG;
-
- // Used for aspect ratio only. Can change without notice (the mux won't care).
- stream->codecpar->width = global_flags.width;
- stream->codecpar->height = global_flags.height;
-
- // TODO: We could perhaps use the interpretation for each card here
- // (or at least the command-line flags) instead of the defaults,
- // but what would we do when they change?
- stream->codecpar->color_primaries = AVCOL_PRI_BT709;
- stream->codecpar->color_trc = AVCOL_TRC_IEC61966_2_1;
- stream->codecpar->color_space = AVCOL_SPC_BT709;
- stream->codecpar->color_range = AVCOL_RANGE_MPEG;
- stream->codecpar->chroma_location = AVCHROMA_LOC_LEFT;
- stream->codecpar->field_order = AV_FIELD_PROGRESSIVE;
+ add_video_stream(avctx.get());
}
for (unsigned card_idx = 0; card_idx < global_flags.card_to_mjpeg_stream_export.size(); ++card_idx) {
- AVStream *stream = avformat_new_stream(avctx.get(), nullptr);
- if (stream == nullptr) {
- fprintf(stderr, "avformat_new_stream() failed\n");
- abort();
- }
- stream->time_base = AVRational{ 1, OUTPUT_FREQUENCY };
- stream->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
- stream->codecpar->codec_id = AV_CODEC_ID_PCM_S32LE;
- stream->codecpar->channel_layout = AV_CH_LAYOUT_STEREO;
- stream->codecpar->channels = 2;
- stream->codecpar->sample_rate = OUTPUT_FREQUENCY;
- }
-
- AVDictionary *options = NULL;
- vector<pair<string, string>> opts = MUX_OPTS;
- for (pair<string, string> opt : opts) {
- av_dict_set(&options, opt.first.c_str(), opt.second.c_str(), 0);
- }
- if (avformat_write_header(avctx.get(), &options) < 0) {
- fprintf(stderr, "avformat_write_header() failed\n");
- abort();
+ add_audio_stream(avctx.get());
}
+ finalize_mux(avctx.get());
// Initialize VA-API.
string error;
any_frames_to_be_encoded.notify_all();
}
-int MJPEGEncoder::get_mjpeg_stream_for_card(unsigned card_index)
+bool MJPEGEncoder::should_encode_mjpeg_for_card(unsigned card_index)
{
// Only bother doing MJPEG encoding if there are any connected clients
// that want the stream.
if (httpd->get_num_connected_multicam_clients() == 0) {
- return -1;
+ return false;
}
auto it = global_flags.card_to_mjpeg_stream_export.find(card_index);
- if (it == global_flags.card_to_mjpeg_stream_export.end()) {
- return -1;
- }
- return it->second;
+ return (it != global_flags.card_to_mjpeg_stream_export.end());
}
void MJPEGEncoder::encoder_thread_func()
frames_to_be_encoded.pop();
}
+ assert(global_flags.card_to_mjpeg_stream_export.count(qf.card_index)); // Or should_encode_mjpeg_for_card() would have returned false.
+ int stream_index = global_flags.card_to_mjpeg_stream_export[qf.card_index];
+
if (va_dpy != nullptr) {
// Will call back in the receiver thread.
encode_jpeg_va(move(qf));
} else {
// Write audio before video, since Futatabi expects it.
if (qf.audio.size() > 0) {
- write_audio_packet(qf.pts, qf.card_index, qf.audio);
+ write_audio_packet(qf.pts, stream_index, qf.audio);
}
// Encode synchronously, in the same thread.
vector<uint8_t> jpeg = encode_jpeg_libjpeg(qf);
- write_mjpeg_packet(qf.pts, qf.card_index, jpeg.data(), jpeg.size());
+ write_mjpeg_packet(qf.pts, stream_index, jpeg.data(), jpeg.size());
}
}
frames_encoding.pop();
}
+ assert(global_flags.card_to_mjpeg_stream_export.count(qf.card_index)); // Or should_encode_mjpeg_for_card() would have returned false.
+ int stream_index = global_flags.card_to_mjpeg_stream_export[qf.card_index];
+
// Write audio before video, since Futatabi expects it.
if (qf.audio.size() > 0) {
- write_audio_packet(qf.pts, qf.card_index, qf.audio);
+ write_audio_packet(qf.pts, stream_index, qf.audio);
}
VAStatus va_status = vaSyncSurface(va_dpy->va_dpy, qf.resources.surface);
CHECK_VASTATUS(va_status, "vaMapBuffer");
const uint8_t *coded_buf = reinterpret_cast<uint8_t *>(segment->buf);
- write_mjpeg_packet(qf.pts, qf.card_index, coded_buf, segment->size);
+ write_mjpeg_packet(qf.pts, stream_index, coded_buf, segment->size);
va_status = vaUnmapBuffer(va_dpy->va_dpy, qf.resources.data_buffer);
CHECK_VASTATUS(va_status, "vaUnmapBuffer");