From 410a5fadcba336c918843cd55e1516cf4fe56abc Mon Sep 17 00:00:00 2001 From: "Steinar H. Gunderson" Date: Sun, 17 Apr 2016 21:37:47 +0200 Subject: [PATCH] Pull the Mux class out of HTTPD. (First step towards decoupling file and HTTP muxing.) --- Makefile | 2 +- httpd.cpp | 104 ------------------------------------------------ httpd.h | 18 +-------- mux.cpp | 115 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ mux.h | 29 ++++++++++++++ 5 files changed, 147 insertions(+), 121 deletions(-) create mode 100644 mux.cpp create mode 100644 mux.h diff --git a/Makefile b/Makefile index d19f064..31e0841 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ OBJS=glwidget.o main.o mainwindow.o vumeter.o lrameter.o vu_common.o correlation OBJS += glwidget.moc.o mainwindow.moc.o vumeter.moc.o lrameter.moc.o correlation_meter.moc.o aboutdialog.moc.o # Mixer objects -OBJS += h264encode.o mixer.o bmusb/bmusb.o pbo_frame_allocator.o context.o ref_counted_frame.o theme.o resampling_queue.o httpd.o ebu_r128_proc.o flags.o image_input.o stereocompressor.o filter.o alsa_output.o correlation_measurer.o +OBJS += h264encode.o mixer.o bmusb/bmusb.o pbo_frame_allocator.o context.o ref_counted_frame.o theme.o resampling_queue.o httpd.o mux.o ebu_r128_proc.o flags.o image_input.o stereocompressor.o filter.o alsa_output.o correlation_measurer.o # DeckLink OBJS += decklink_capture.o decklink/DeckLinkAPIDispatch.o diff --git a/httpd.cpp b/httpd.cpp index f7ae6a0..7375e8a 100644 --- a/httpd.cpp +++ b/httpd.cpp @@ -140,110 +140,6 @@ void HTTPD::request_completed(struct MHD_Connection *connection, void **con_cls, } } -HTTPD::Mux::Mux(AVFormatContext *avctx, int width, int height, Codec video_codec, int time_base) - : avctx(avctx) -{ - AVCodec *codec_video = avcodec_find_encoder((video_codec == CODEC_H264) ? AV_CODEC_ID_H264 : AV_CODEC_ID_RAWVIDEO); - avstream_video = avformat_new_stream(avctx, codec_video); - if (avstream_video == nullptr) { - fprintf(stderr, "avformat_new_stream() failed\n"); - exit(1); - } - avstream_video->time_base = AVRational{1, time_base}; - avstream_video->codec->codec_type = AVMEDIA_TYPE_VIDEO; - if (video_codec == CODEC_H264) { - avstream_video->codec->codec_id = AV_CODEC_ID_H264; - } else { - assert(video_codec == CODEC_NV12); - avstream_video->codec->codec_id = AV_CODEC_ID_RAWVIDEO; - avstream_video->codec->codec_tag = avcodec_pix_fmt_to_codec_tag(AV_PIX_FMT_NV12); - } - avstream_video->codec->width = width; - avstream_video->codec->height = height; - avstream_video->codec->time_base = AVRational{1, time_base}; - avstream_video->codec->ticks_per_frame = 1; // or 2? - - // Colorspace details. Closely correspond to settings in EffectChain_finalize, - // as noted in each comment. - // Note that the H.264 stream also contains this information and depending on the - // mux, this might simply get ignored. See sps_rbsp(). - avstream_video->codec->color_primaries = AVCOL_PRI_BT709; // RGB colorspace (inout_format.color_space). - avstream_video->codec->color_trc = AVCOL_TRC_UNSPECIFIED; // Gamma curve (inout_format.gamma_curve). - avstream_video->codec->colorspace = AVCOL_SPC_SMPTE170M; // YUV colorspace (output_ycbcr_format.luma_coefficients). - avstream_video->codec->color_range = AVCOL_RANGE_MPEG; // Full vs. limited range (output_ycbcr_format.full_range). - avstream_video->codec->chroma_sample_location = AVCHROMA_LOC_LEFT; // Chroma sample location. See chroma_offset_0[] in Mixer::subsample_chroma(). - avstream_video->codec->field_order = AV_FIELD_PROGRESSIVE; - if (avctx->oformat->flags & AVFMT_GLOBALHEADER) { - avstream_video->codec->flags = AV_CODEC_FLAG_GLOBAL_HEADER; - } - - AVCodec *codec_audio = avcodec_find_encoder_by_name(AUDIO_OUTPUT_CODEC_NAME); - if (codec_audio == nullptr) { - fprintf(stderr, "ERROR: Could not find codec '%s'\n", AUDIO_OUTPUT_CODEC_NAME); - exit(1); - } - avstream_audio = avformat_new_stream(avctx, codec_audio); - if (avstream_audio == nullptr) { - fprintf(stderr, "avformat_new_stream() failed\n"); - exit(1); - } - avstream_audio->time_base = AVRational{1, time_base}; - avstream_audio->codec->bit_rate = AUDIO_OUTPUT_BIT_RATE; - avstream_audio->codec->sample_rate = OUTPUT_FREQUENCY; - avstream_audio->codec->channels = 2; - avstream_audio->codec->channel_layout = AV_CH_LAYOUT_STEREO; - avstream_audio->codec->time_base = AVRational{1, time_base}; - if (avctx->oformat->flags & AVFMT_GLOBALHEADER) { - avstream_audio->codec->flags = AV_CODEC_FLAG_GLOBAL_HEADER; - } - - AVDictionary *options = NULL; - vector> opts = MUX_OPTS; - for (pair opt : opts) { - av_dict_set(&options, opt.first.c_str(), opt.second.c_str(), 0); - } - if (avformat_write_header(avctx, &options) < 0) { - fprintf(stderr, "avformat_write_header() failed\n"); - exit(1); - } -} - -HTTPD::Mux::~Mux() -{ - av_write_trailer(avctx); - av_free(avctx->pb->buffer); - av_free(avctx->pb); - avformat_free_context(avctx); -} - -void HTTPD::Mux::add_packet(const AVPacket &pkt, int64_t pts, int64_t dts) -{ - if (!seen_keyframe && !(pkt.stream_index == 0 && (pkt.flags & AV_PKT_FLAG_KEY))) { - // Wait until we see the first (video) key frame. - return; - } - seen_keyframe = true; - - AVPacket pkt_copy; - av_copy_packet(&pkt_copy, &pkt); - if (pkt.stream_index == 0) { - pkt_copy.pts = av_rescale_q(pts, AVRational{1, TIMEBASE}, avstream_video->time_base); - pkt_copy.dts = av_rescale_q(dts, AVRational{1, TIMEBASE}, avstream_video->time_base); - } else if (pkt.stream_index == 1) { - pkt_copy.pts = av_rescale_q(pts, AVRational{1, TIMEBASE}, avstream_audio->time_base); - pkt_copy.dts = av_rescale_q(dts, AVRational{1, TIMEBASE}, avstream_audio->time_base); - } else { - assert(false); - } - - if (av_interleaved_write_frame(avctx, &pkt_copy) < 0) { - fprintf(stderr, "av_interleaved_write_frame() failed\n"); - exit(1); - } - - av_packet_unref(&pkt_copy); -} - HTTPD::Stream::Stream(AVOutputFormat *oformat, int width, int height, int time_base) { AVFormatContext *avctx = avformat_alloc_context(); diff --git a/httpd.h b/httpd.h index e6f12d9..d5c805c 100644 --- a/httpd.h +++ b/httpd.h @@ -27,6 +27,8 @@ extern "C" { #include } +#include "mux.h" + class HTTPD { public: enum PacketDestination { @@ -60,22 +62,6 @@ private: void request_completed(struct MHD_Connection *connection, void **con_cls, enum MHD_RequestTerminationCode toe); - class Mux { - public: - enum Codec { - CODEC_H264, - CODEC_NV12, // Uncompressed 4:2:0. - }; - - Mux(AVFormatContext *avctx, int width, int height, Codec video_codec, int time_base); // Takes ownership of avctx. - ~Mux(); - void add_packet(const AVPacket &pkt, int64_t pts, int64_t dts); - - private: - bool seen_keyframe = false; - AVFormatContext *avctx; - AVStream *avstream_video, *avstream_audio; - }; class Stream { public: diff --git a/mux.cpp b/mux.cpp new file mode 100644 index 0000000..82d42ab --- /dev/null +++ b/mux.cpp @@ -0,0 +1,115 @@ +#include + +#include +#include + +#include "defs.h" +#include "mux.h" +#include "timebase.h" + +using namespace std; + +Mux::Mux(AVFormatContext *avctx, int width, int height, Codec video_codec, int time_base) + : avctx(avctx) +{ + AVCodec *codec_video = avcodec_find_encoder((video_codec == CODEC_H264) ? AV_CODEC_ID_H264 : AV_CODEC_ID_RAWVIDEO); + avstream_video = avformat_new_stream(avctx, codec_video); + if (avstream_video == nullptr) { + fprintf(stderr, "avformat_new_stream() failed\n"); + exit(1); + } + avstream_video->time_base = AVRational{1, time_base}; + avstream_video->codec->codec_type = AVMEDIA_TYPE_VIDEO; + if (video_codec == CODEC_H264) { + avstream_video->codec->codec_id = AV_CODEC_ID_H264; + } else { + assert(video_codec == CODEC_NV12); + avstream_video->codec->codec_id = AV_CODEC_ID_RAWVIDEO; + avstream_video->codec->codec_tag = avcodec_pix_fmt_to_codec_tag(AV_PIX_FMT_NV12); + } + avstream_video->codec->width = width; + avstream_video->codec->height = height; + avstream_video->codec->time_base = AVRational{1, time_base}; + avstream_video->codec->ticks_per_frame = 1; // or 2? + + // Colorspace details. Closely correspond to settings in EffectChain_finalize, + // as noted in each comment. + // Note that the H.264 stream also contains this information and depending on the + // mux, this might simply get ignored. See sps_rbsp(). + avstream_video->codec->color_primaries = AVCOL_PRI_BT709; // RGB colorspace (inout_format.color_space). + avstream_video->codec->color_trc = AVCOL_TRC_UNSPECIFIED; // Gamma curve (inout_format.gamma_curve). + avstream_video->codec->colorspace = AVCOL_SPC_SMPTE170M; // YUV colorspace (output_ycbcr_format.luma_coefficients). + avstream_video->codec->color_range = AVCOL_RANGE_MPEG; // Full vs. limited range (output_ycbcr_format.full_range). + avstream_video->codec->chroma_sample_location = AVCHROMA_LOC_LEFT; // Chroma sample location. See chroma_offset_0[] in Mixer::subsample_chroma(). + avstream_video->codec->field_order = AV_FIELD_PROGRESSIVE; + if (avctx->oformat->flags & AVFMT_GLOBALHEADER) { + avstream_video->codec->flags = AV_CODEC_FLAG_GLOBAL_HEADER; + } + + AVCodec *codec_audio = avcodec_find_encoder_by_name(AUDIO_OUTPUT_CODEC_NAME); + if (codec_audio == nullptr) { + fprintf(stderr, "ERROR: Could not find codec '%s'\n", AUDIO_OUTPUT_CODEC_NAME); + exit(1); + } + avstream_audio = avformat_new_stream(avctx, codec_audio); + if (avstream_audio == nullptr) { + fprintf(stderr, "avformat_new_stream() failed\n"); + exit(1); + } + avstream_audio->time_base = AVRational{1, time_base}; + avstream_audio->codec->bit_rate = AUDIO_OUTPUT_BIT_RATE; + avstream_audio->codec->sample_rate = OUTPUT_FREQUENCY; + avstream_audio->codec->channels = 2; + avstream_audio->codec->channel_layout = AV_CH_LAYOUT_STEREO; + avstream_audio->codec->time_base = AVRational{1, time_base}; + if (avctx->oformat->flags & AVFMT_GLOBALHEADER) { + avstream_audio->codec->flags = AV_CODEC_FLAG_GLOBAL_HEADER; + } + + AVDictionary *options = NULL; + vector> opts = MUX_OPTS; + for (pair opt : opts) { + av_dict_set(&options, opt.first.c_str(), opt.second.c_str(), 0); + } + if (avformat_write_header(avctx, &options) < 0) { + fprintf(stderr, "avformat_write_header() failed\n"); + exit(1); + } +} + +Mux::~Mux() +{ + av_write_trailer(avctx); + av_free(avctx->pb->buffer); + av_free(avctx->pb); + avformat_free_context(avctx); +} + +void Mux::add_packet(const AVPacket &pkt, int64_t pts, int64_t dts) +{ + if (!seen_keyframe && !(pkt.stream_index == 0 && (pkt.flags & AV_PKT_FLAG_KEY))) { + // Wait until we see the first (video) key frame. + return; + } + seen_keyframe = true; + + AVPacket pkt_copy; + av_copy_packet(&pkt_copy, &pkt); + if (pkt.stream_index == 0) { + pkt_copy.pts = av_rescale_q(pts, AVRational{1, TIMEBASE}, avstream_video->time_base); + pkt_copy.dts = av_rescale_q(dts, AVRational{1, TIMEBASE}, avstream_video->time_base); + } else if (pkt.stream_index == 1) { + pkt_copy.pts = av_rescale_q(pts, AVRational{1, TIMEBASE}, avstream_audio->time_base); + pkt_copy.dts = av_rescale_q(dts, AVRational{1, TIMEBASE}, avstream_audio->time_base); + } else { + assert(false); + } + + if (av_interleaved_write_frame(avctx, &pkt_copy) < 0) { + fprintf(stderr, "av_interleaved_write_frame() failed\n"); + exit(1); + } + + av_packet_unref(&pkt_copy); +} + diff --git a/mux.h b/mux.h new file mode 100644 index 0000000..2aefffc --- /dev/null +++ b/mux.h @@ -0,0 +1,29 @@ +#ifndef _MUX_H +#define _MUX_H 1 + +// Wrapper around an AVFormat mux. + +extern "C" { +#include +#include +#include +} + +class Mux { +public: + enum Codec { + CODEC_H264, + CODEC_NV12, // Uncompressed 4:2:0. + }; + + Mux(AVFormatContext *avctx, int width, int height, Codec video_codec, int time_base); // Takes ownership of avctx. + ~Mux(); + void add_packet(const AVPacket &pkt, int64_t pts, int64_t dts); + +private: + bool seen_keyframe = false; + AVFormatContext *avctx; + AVStream *avstream_video, *avstream_audio; +}; + +#endif // !defined(_MUX_H) -- 2.39.2