X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=nageru%2Fmjpeg_encoder.cpp;fp=nageru%2Fmjpeg_encoder.cpp;h=19ffd58719aa151c01144d850cc987799cd175ed;hb=bcd177e1daf5a63d7bf877bc5d30d8803dfd472c;hp=a39f1c8adc1a5be2bc11760987fce7457ad0e8fb;hpb=d9ddc0c342d6145c14eef021a58d8969043e24e4;p=nageru diff --git a/nageru/mjpeg_encoder.cpp b/nageru/mjpeg_encoder.cpp index a39f1c8..19ffd58 100644 --- a/nageru/mjpeg_encoder.cpp +++ b/nageru/mjpeg_encoder.cpp @@ -1,5 +1,6 @@ #include "mjpeg_encoder.h" +#include #include #include #if __SSE2__ @@ -102,17 +103,18 @@ static_assert(std::is_standard_layout::value, ""); int MJPEGEncoder::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time) { - MJPEGEncoder *engine = (MJPEGEncoder *)opaque; - return engine->write_packet2(buf, buf_size, type, time); + WritePacket2Context *ctx = (WritePacket2Context *)opaque; + return ctx->mjpeg_encoder->write_packet2(ctx->stream_id, buf, buf_size, type, time); } -int MJPEGEncoder::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time) +int MJPEGEncoder::write_packet2(HTTPD::StreamID stream_id, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time) { + string *mux_header = &streams[stream_id].mux_header; if (type == AVIO_DATA_MARKER_HEADER) { - mux_header.append((char *)buf, buf_size); - httpd->set_header(HTTPD::MULTICAM_STREAM, mux_header); + mux_header->append((char *)buf, buf_size); + httpd->set_header(stream_id, *mux_header); } else { - httpd->add_data(HTTPD::MULTICAM_STREAM, (char *)buf, buf_size, /*keyframe=*/true, AV_NOPTS_VALUE, AVRational{ AV_TIME_BASE, 1 }); + httpd->add_data(stream_id, (char *)buf, buf_size, /*keyframe=*/true, AV_NOPTS_VALUE, AVRational{ AV_TIME_BASE, 1 }); } return buf_size; } @@ -182,24 +184,12 @@ void finalize_mux(AVFormatContext *avctx) MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display) : httpd(httpd) { - // Set up the mux. We don't use the Mux wrapper, because it's geared towards - // a situation with only one video stream (and possibly one audio stream) - // with known width/height, and we don't need the extra functionality it provides. - avctx.reset(avformat_alloc_context()); - avctx->oformat = av_guess_format("nut", nullptr, nullptr); - - uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE); - avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr); - avctx->pb->write_data_type = &MJPEGEncoder::write_packet2_thunk; - avctx->flags = AVFMT_FLAG_CUSTOM_IO; - - for (unsigned card_idx = 0; card_idx < global_flags.card_to_mjpeg_stream_export.size(); ++card_idx) { - add_video_stream(avctx.get()); + create_ffmpeg_context(HTTPD::StreamID{ HTTPD::MULTICAM_STREAM, 0 }); + for (unsigned stream_idx = 0; stream_idx < MAX_VIDEO_CARDS; ++stream_idx) { + create_ffmpeg_context(HTTPD::StreamID{ HTTPD::SIPHON_STREAM, stream_idx }); } - for (unsigned card_idx = 0; card_idx < global_flags.card_to_mjpeg_stream_export.size(); ++card_idx) { - add_audio_stream(avctx.get()); - } - finalize_mux(avctx.get()); + + add_stream(HTTPD::StreamID{ HTTPD::MULTICAM_STREAM, 0 }); // Initialize VA-API. string error; @@ -225,7 +215,9 @@ MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display) MJPEGEncoder::~MJPEGEncoder() { - av_free(avctx->pb->buffer); + for (auto &id_and_stream : streams) { + av_free(id_and_stream.second.avctx->pb->buffer); + } global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "zero_size" }}); global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "interlaced" }}); @@ -347,7 +339,8 @@ bool MJPEGEncoder::should_encode_mjpeg_for_card(unsigned card_index) { // Only bother doing MJPEG encoding if there are any connected clients // that want the stream. - if (httpd->get_num_connected_multicam_clients() == 0) { + if (httpd->get_num_connected_multicam_clients() == 0 && + httpd->get_num_connected_siphon_clients(card_index) == 0) { return false; } @@ -380,14 +373,26 @@ void MJPEGEncoder::encoder_thread_func() // Will call back in the receiver thread. encode_jpeg_va(move(qf)); } else { + update_siphon_streams(); + + HTTPD::StreamID multicam_id{ HTTPD::MULTICAM_STREAM, 0 }; + HTTPD::StreamID siphon_id{ HTTPD::SIPHON_STREAM, qf.card_index }; + assert(streams.count(multicam_id)); + // Write audio before video, since Futatabi expects it. if (qf.audio.size() > 0) { - write_audio_packet(qf.pts, stream_index, qf.audio); + write_audio_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index + global_flags.card_to_mjpeg_stream_export.size(), qf.audio); + if (streams.count(siphon_id)) { + write_audio_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/1, qf.audio); + } } // Encode synchronously, in the same thread. vector jpeg = encode_jpeg_libjpeg(qf); - write_mjpeg_packet(qf.pts, stream_index, jpeg.data(), jpeg.size()); + write_mjpeg_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index, jpeg.data(), jpeg.size()); + if (streams.count(siphon_id)) { + write_mjpeg_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/0, jpeg.data(), jpeg.size()); + } } } @@ -397,40 +402,40 @@ void MJPEGEncoder::encoder_thread_func() free(tmp_cr); } -void MJPEGEncoder::write_mjpeg_packet(int64_t pts, unsigned card_index, const uint8_t *jpeg, size_t jpeg_size) +void MJPEGEncoder::write_mjpeg_packet(AVFormatContext *avctx, int64_t pts, unsigned stream_index, const uint8_t *jpeg, size_t jpeg_size) { AVPacket pkt; memset(&pkt, 0, sizeof(pkt)); pkt.buf = nullptr; pkt.data = const_cast(jpeg); pkt.size = jpeg_size; - pkt.stream_index = card_index; + pkt.stream_index = stream_index; pkt.flags = AV_PKT_FLAG_KEY; AVRational time_base = avctx->streams[pkt.stream_index]->time_base; pkt.pts = pkt.dts = av_rescale_q(pts, AVRational{ 1, TIMEBASE }, time_base); pkt.duration = 0; - if (av_write_frame(avctx.get(), &pkt) < 0) { + if (av_write_frame(avctx, &pkt) < 0) { fprintf(stderr, "av_write_frame() failed\n"); abort(); } } -void MJPEGEncoder::write_audio_packet(int64_t pts, unsigned card_index, const vector &audio) +void MJPEGEncoder::write_audio_packet(AVFormatContext *avctx, int64_t pts, unsigned stream_index, const vector &audio) { AVPacket pkt; memset(&pkt, 0, sizeof(pkt)); pkt.buf = nullptr; pkt.data = reinterpret_cast(const_cast(&audio[0])); pkt.size = audio.size() * sizeof(audio[0]); - pkt.stream_index = card_index + global_flags.card_to_mjpeg_stream_export.size(); + pkt.stream_index = stream_index; pkt.flags = AV_PKT_FLAG_KEY; AVRational time_base = avctx->streams[pkt.stream_index]->time_base; pkt.pts = pkt.dts = av_rescale_q(pts, AVRational{ 1, TIMEBASE }, time_base); size_t num_stereo_samples = audio.size() / 2; pkt.duration = av_rescale_q(num_stereo_samples, AVRational{ 1, OUTPUT_FREQUENCY }, time_base); - if (av_write_frame(avctx.get(), &pkt) < 0) { + if (av_write_frame(avctx, &pkt) < 0) { fprintf(stderr, "av_write_frame() failed\n"); abort(); } @@ -890,12 +895,22 @@ void MJPEGEncoder::va_receiver_thread_func() frames_encoding.pop(); } + update_siphon_streams(); + assert(global_flags.card_to_mjpeg_stream_export.count(qf.card_index)); // Or should_encode_mjpeg_for_card() would have returned false. int stream_index = global_flags.card_to_mjpeg_stream_export[qf.card_index]; + HTTPD::StreamID multicam_id{ HTTPD::MULTICAM_STREAM, 0 }; + HTTPD::StreamID siphon_id{ HTTPD::SIPHON_STREAM, qf.card_index }; + assert(streams.count(multicam_id)); + assert(streams[multicam_id].avctx != nullptr); + // Write audio before video, since Futatabi expects it. if (qf.audio.size() > 0) { - write_audio_packet(qf.pts, stream_index, qf.audio); + write_audio_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index + global_flags.card_to_mjpeg_stream_export.size(), qf.audio); + if (streams.count(siphon_id)) { + write_audio_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/1, qf.audio); + } } VAStatus va_status = vaSyncSurface(va_dpy->va_dpy, qf.resources.surface); @@ -906,7 +921,10 @@ void MJPEGEncoder::va_receiver_thread_func() CHECK_VASTATUS(va_status, "vaMapBuffer"); const uint8_t *coded_buf = reinterpret_cast(segment->buf); - write_mjpeg_packet(qf.pts, stream_index, coded_buf, segment->size); + write_mjpeg_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index, coded_buf, segment->size); + if (streams.count(siphon_id)) { + write_mjpeg_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/0, coded_buf, segment->size); + } va_status = vaUnmapBuffer(va_dpy->va_dpy, qf.resources.data_buffer); CHECK_VASTATUS(va_status, "vaUnmapBuffer"); @@ -943,3 +961,65 @@ vector MJPEGEncoder::encode_jpeg_libjpeg(const QueuedFrame &qf) return dest.dest; } + +void MJPEGEncoder::add_stream(HTTPD::StreamID stream_id) +{ + AVFormatContextWithCloser avctx; + + // Set up the mux. We don't use the Mux wrapper, because it's geared towards + // a situation with only one video stream (and possibly one audio stream) + // with known width/height, and we don't need the extra functionality it provides. + avctx.reset(avformat_alloc_context()); + avctx->oformat = av_guess_format("nut", nullptr, nullptr); + + uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE); + avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, &ffmpeg_contexts[stream_id], nullptr, nullptr, nullptr); + avctx->pb->write_data_type = &MJPEGEncoder::write_packet2_thunk; + avctx->flags = AVFMT_FLAG_CUSTOM_IO; + + if (stream_id.type == HTTPD::MULTICAM_STREAM) { + for (unsigned card_idx = 0; card_idx < global_flags.card_to_mjpeg_stream_export.size(); ++card_idx) { + add_video_stream(avctx.get()); + } + for (unsigned card_idx = 0; card_idx < global_flags.card_to_mjpeg_stream_export.size(); ++card_idx) { + add_audio_stream(avctx.get()); + } + } else { + assert(stream_id.type == HTTPD::SIPHON_STREAM); + add_video_stream(avctx.get()); + add_audio_stream(avctx.get()); + } + finalize_mux(avctx.get()); + + Stream s; + s.avctx = move(avctx); + streams[stream_id] = move(s); +} + +void MJPEGEncoder::update_siphon_streams() +{ + // Bring the list of streams into sync with what the clients need. + for (auto it = streams.begin(); it != streams.end(); ) { + if (it->first.type != HTTPD::SIPHON_STREAM) { + ++it; + continue; + } + if (httpd->get_num_connected_siphon_clients(it->first.index) == 0) { + av_free(it->second.avctx->pb->buffer); + streams.erase(it++); + } else { + ++it; + } + } + for (unsigned stream_idx = 0; stream_idx < MAX_VIDEO_CARDS; ++stream_idx) { + HTTPD::StreamID stream_id{ HTTPD::SIPHON_STREAM, stream_idx }; + if (streams.count(stream_id) == 0 && httpd->get_num_connected_siphon_clients(stream_idx) > 0) { + add_stream(stream_id); + } + } +} + +void MJPEGEncoder::create_ffmpeg_context(HTTPD::StreamID stream_id) +{ + ffmpeg_contexts.emplace(stream_id, WritePacket2Context{ this, stream_id }); +}