From 4ec330853ee13b230f089253fedd20b49229e42b Mon Sep 17 00:00:00 2001 From: "Steinar H. Gunderson" Date: Sun, 25 Oct 2015 20:25:07 +0100 Subject: [PATCH] Unify muxing between the local file and networking. --- h264encode.cpp | 68 ++++++++------------------------------ h264encode.h | 6 ++-- httpd.cpp | 88 ++++++++++++++++++++++++++++++++------------------ httpd.h | 33 +++++++++++++++---- mixer.cpp | 5 +-- 5 files changed, 100 insertions(+), 100 deletions(-) diff --git a/h264encode.cpp b/h264encode.cpp index 3880c18..55ed9a1 100644 --- a/h264encode.cpp +++ b/h264encode.cpp @@ -1656,8 +1656,6 @@ int H264Encoder::save_codeddata(storage_task task) AVPacket pkt; memset(&pkt, 0, sizeof(pkt)); pkt.buf = nullptr; - pkt.pts = av_rescale_q(task.pts + global_delay, AVRational{1, TIMEBASE}, avstream_video->time_base); - pkt.dts = av_rescale_q(task.dts + global_delay, AVRational{1, TIMEBASE}, avstream_video->time_base); pkt.data = reinterpret_cast(&data[0]); pkt.size = data.size(); pkt.stream_index = 0; @@ -1667,8 +1665,7 @@ int H264Encoder::save_codeddata(storage_task task) pkt.flags = 0; } //pkt.duration = 1; - httpd->add_packet(pkt); - av_interleaved_write_frame(avctx, &pkt); + httpd->add_packet(pkt, task.pts + global_delay, task.dts + global_delay); } // Encode and add all audio frames up to and including the pts of this video frame. // (They can never be queued to us after the video frame they belong to, only before.) @@ -1684,6 +1681,7 @@ int H264Encoder::save_codeddata(storage_task task) audio = move(it->second); pending_audio_frames.erase(it); } + AVFrame *frame = avcodec_alloc_frame(); frame->nb_samples = audio.size() / 2; frame->format = AV_SAMPLE_FMT_FLT; @@ -1701,13 +1699,10 @@ int H264Encoder::save_codeddata(storage_task task) pkt.data = nullptr; pkt.size = 0; int got_output; - avcodec_encode_audio2(avstream_audio->codec, &pkt, frame, &got_output); + avcodec_encode_audio2(context_audio, &pkt, frame, &got_output); if (got_output) { - pkt.pts = av_rescale_q(audio_pts + global_delay, AVRational{1, TIMEBASE}, avstream_audio->time_base); - pkt.dts = pkt.pts; pkt.stream_index = 1; - httpd->add_packet(pkt); - av_interleaved_write_frame(avctx, &pkt); + httpd->add_packet(pkt, audio_pts + global_delay, audio_pts + global_delay); } // TODO: Delayed frames. avcodec_free_frame(&frame); @@ -1821,56 +1816,22 @@ static int print_input() return 0; } - -//H264Encoder::H264Encoder(SDL_Window *window, SDL_GLContext context, int width, int height, const char *output_filename) -H264Encoder::H264Encoder(QSurface *surface, int width, int height, const char *output_filename, HTTPD *httpd) +H264Encoder::H264Encoder(QSurface *surface, int width, int height, HTTPD *httpd) : current_storage_frame(0), surface(surface), httpd(httpd) - //: width(width), height(height), current_encoding_frame(0) { - avctx = avformat_alloc_context(); - avctx->oformat = av_guess_format(NULL, output_filename, NULL); - strcpy(avctx->filename, output_filename); - if (avio_open2(&avctx->pb, output_filename, AVIO_FLAG_WRITE, &avctx->interrupt_callback, NULL) < 0) { - fprintf(stderr, "%s: avio_open2() failed\n", output_filename); - exit(1); - } - AVCodec *codec_video = avcodec_find_encoder(AV_CODEC_ID_H264); - avstream_video = avformat_new_stream(avctx, codec_video); - if (avstream_video == nullptr) { - fprintf(stderr, "%s: avformat_new_stream() failed\n", output_filename); - exit(1); - } - avstream_video->time_base = AVRational{1, TIMEBASE}; - avstream_video->codec->width = width; - avstream_video->codec->height = height; - avstream_video->codec->time_base = AVRational{1, TIMEBASE}; - avstream_video->codec->ticks_per_frame = 1; // or 2? - AVCodec *codec_audio = avcodec_find_encoder(AV_CODEC_ID_MP3); - avstream_audio = avformat_new_stream(avctx, codec_audio); - if (avstream_audio == nullptr) { - fprintf(stderr, "%s: avformat_new_stream() failed\n", output_filename); - exit(1); - } - avstream_audio->time_base = AVRational{1, TIMEBASE}; - avstream_audio->codec->bit_rate = 256000; - avstream_audio->codec->sample_rate = 48000; - avstream_audio->codec->sample_fmt = AV_SAMPLE_FMT_FLTP; - avstream_audio->codec->channels = 2; - avstream_audio->codec->channel_layout = AV_CH_LAYOUT_STEREO; - avstream_audio->codec->time_base = AVRational{1, TIMEBASE}; - - /* open it */ - if (avcodec_open2(avstream_audio->codec, codec_audio, NULL) < 0) { + context_audio = avcodec_alloc_context3(codec_audio); + context_audio->bit_rate = 256000; + context_audio->sample_rate = 48000; + context_audio->sample_fmt = AV_SAMPLE_FMT_FLTP; + context_audio->channels = 2; + context_audio->channel_layout = AV_CH_LAYOUT_STEREO; + context_audio->time_base = AVRational{1, TIMEBASE}; + if (avcodec_open2(context_audio, codec_audio, NULL) < 0) { fprintf(stderr, "Could not open codec\n"); exit(1); } - if (avformat_write_header(avctx, NULL) < 0) { - fprintf(stderr, "%s: avformat_write_header() failed\n", output_filename); - exit(1); - } - frame_width = width; frame_height = height; frame_width_mbaligned = (frame_width + 15) & (~15); @@ -1922,9 +1883,6 @@ H264Encoder::~H264Encoder() release_encode(); deinit_va(); - - av_write_trailer(avctx); - avformat_free_context(avctx); } bool H264Encoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex) diff --git a/h264encode.h b/h264encode.h index 48dd329..7a10d92 100644 --- a/h264encode.h +++ b/h264encode.h @@ -52,7 +52,7 @@ class QSurface; class H264Encoder { public: - H264Encoder(QSurface *surface, int width, int height, const char *output_filename, HTTPD *httpd); + H264Encoder(QSurface *surface, int width, int height, HTTPD *httpd); ~H264Encoder(); //void add_frame(FrameAllocator::Frame frame, GLsync fence); @@ -109,9 +109,7 @@ private: std::map> pending_audio_frames; // under frame_queue_mutex QSurface *surface; - AVFormatContext *avctx; - AVStream *avstream_video; - AVStream *avstream_audio; + AVCodecContext *context_audio; HTTPD *httpd; }; diff --git a/httpd.cpp b/httpd.cpp index 3485a83..38e685e 100644 --- a/httpd.cpp +++ b/httpd.cpp @@ -11,7 +11,19 @@ extern "C" { using namespace std; -HTTPD::HTTPD() {} +HTTPD::HTTPD(const char *output_filename, int width, int height) + : width(width), height(height) +{ + AVFormatContext *avctx = avformat_alloc_context(); + avctx->oformat = av_guess_format(NULL, output_filename, NULL); + strcpy(avctx->filename, output_filename); + if (avio_open2(&avctx->pb, output_filename, AVIO_FLAG_WRITE, &avctx->interrupt_callback, NULL) < 0) { + fprintf(stderr, "%s: avio_open2() failed\n", output_filename); + exit(1); + } + + file_mux.reset(new Mux(avctx, width, height)); +} void HTTPD::start(int port) { @@ -21,11 +33,12 @@ void HTTPD::start(int port) &answer_to_connection_thunk, this, MHD_OPTION_END); } -void HTTPD::add_packet(const AVPacket &pkt) +void HTTPD::add_packet(const AVPacket &pkt, int64_t pts, int64_t dts) { for (Stream *stream : streams) { - stream->add_packet(pkt); + stream->add_packet(pkt, pts, dts); } + file_mux->add_packet(pkt, pts, dts); } int HTTPD::answer_to_connection_thunk(void *cls, MHD_Connection *connection, @@ -45,7 +58,7 @@ int HTTPD::answer_to_connection(MHD_Connection *connection, printf("url %s\n", url); AVOutputFormat *oformat = av_guess_format("mpegts", nullptr, nullptr); assert(oformat != nullptr); - HTTPD::Stream *stream = new HTTPD::Stream(oformat); + HTTPD::Stream *stream = new HTTPD::Stream(oformat, width, height); streams.push_back(stream); MHD_Response *response = MHD_create_response_from_callback( (size_t)-1, 1048576, &HTTPD::Stream::reader_callback_thunk, stream, &HTTPD::free_stream); @@ -61,15 +74,9 @@ void HTTPD::free_stream(void *cls) delete stream; } -HTTPD::Stream::Stream(AVOutputFormat *oformat) +HTTPD::Mux::Mux(AVFormatContext *avctx, int width, int height) + : avctx(avctx) { - avctx = avformat_alloc_context(); - avctx->oformat = oformat; - uint8_t *buf = (uint8_t *)av_malloc(1048576); - avctx->pb = avio_alloc_context(buf, 1048576, 1, this, nullptr, &HTTPD::Stream::write_packet_thunk, nullptr); - avctx->flags = AVFMT_FLAG_CUSTOM_IO; - - // TODO: Unify with the code in h264encoder.cpp. AVCodec *codec_video = avcodec_find_encoder(AV_CODEC_ID_H264); avstream_video = avformat_new_stream(avctx, codec_video); if (avstream_video == nullptr) { @@ -77,8 +84,8 @@ HTTPD::Stream::Stream(AVOutputFormat *oformat) exit(1); } avstream_video->time_base = AVRational{1, TIMEBASE}; - avstream_video->codec->width = 1280; // FIXME - avstream_video->codec->height = 720; // FIXME + avstream_video->codec->width = width; + avstream_video->codec->height = height; avstream_video->codec->time_base = AVRational{1, TIMEBASE}; avstream_video->codec->ticks_per_frame = 1; // or 2? @@ -102,11 +109,43 @@ HTTPD::Stream::Stream(AVOutputFormat *oformat) } } -HTTPD::Stream::~Stream() +HTTPD::Mux::~Mux() { + av_write_trailer(avctx); avformat_free_context(avctx); } +void HTTPD::Mux::add_packet(const AVPacket &pkt, int64_t pts, int64_t dts) +{ + AVPacket pkt_copy; + av_copy_packet(&pkt_copy, &pkt); + if (pkt.stream_index == 0) { + pkt_copy.pts = av_rescale_q(pts, AVRational{1, TIMEBASE}, avstream_video->time_base); + pkt_copy.dts = av_rescale_q(dts, AVRational{1, TIMEBASE}, avstream_video->time_base); + } else if (pkt.stream_index == 1) { + pkt_copy.pts = av_rescale_q(pts, AVRational{1, TIMEBASE}, avstream_audio->time_base); + pkt_copy.dts = av_rescale_q(dts, AVRational{1, TIMEBASE}, avstream_audio->time_base); + } else { + assert(false); + } + + if (av_interleaved_write_frame(avctx, &pkt_copy) < 0) { + fprintf(stderr, "av_interleaved_write_frame() failed\n"); + exit(1); + } +} + +HTTPD::Stream::Stream(AVOutputFormat *oformat, int width, int height) +{ + AVFormatContext *avctx = avformat_alloc_context(); + avctx->oformat = oformat; + uint8_t *buf = (uint8_t *)av_malloc(1048576); + avctx->pb = avio_alloc_context(buf, 1048576, 1, this, nullptr, &HTTPD::Stream::write_packet_thunk, nullptr); + avctx->flags = AVFMT_FLAG_CUSTOM_IO; + + mux.reset(new Mux(avctx, width, height)); +} + ssize_t HTTPD::Stream::reader_callback_thunk(void *cls, uint64_t pos, char *buf, size_t max) { HTTPD::Stream *stream = (HTTPD::Stream *)cls; @@ -142,24 +181,9 @@ ssize_t HTTPD::Stream::reader_callback(uint64_t pos, char *buf, size_t max) return ret; } -void HTTPD::Stream::add_packet(const AVPacket &pkt) +void HTTPD::Stream::add_packet(const AVPacket &pkt, int64_t pts, int64_t dts) { - AVPacket pkt_copy; - av_copy_packet(&pkt_copy, &pkt); - if (pkt.stream_index == 0) { - pkt_copy.pts = av_rescale_q(pkt.pts, AVRational{1, TIMEBASE}, avstream_video->time_base); - pkt_copy.dts = av_rescale_q(pkt.dts, AVRational{1, TIMEBASE}, avstream_video->time_base); - } else if (pkt.stream_index == 1) { - pkt_copy.pts = av_rescale_q(pkt.pts, AVRational{1, TIMEBASE}, avstream_audio->time_base); - pkt_copy.dts = av_rescale_q(pkt.dts, AVRational{1, TIMEBASE}, avstream_audio->time_base); - } else { - assert(false); - } - - if (av_interleaved_write_frame(avctx, &pkt_copy) < 0) { - fprintf(stderr, "av_interleaved_write_frame() failed\n"); - exit(1); - } + mux->add_packet(pkt, pts, dts); } int HTTPD::Stream::write_packet_thunk(void *opaque, uint8_t *buf, int buf_size) diff --git a/httpd.h b/httpd.h index 0a092aa..524c8d6 100644 --- a/httpd.h +++ b/httpd.h @@ -1,6 +1,13 @@ #ifndef _HTTPD_H #define _HTTPD_H +// A class dealing with stream output, both to HTTP (thus the class name) +// and to local output files. Since we generally have very few outputs +// (end clients are not meant to connect directly to our stream; it should be +// transcoded by something else and then sent to a reflector), we don't need to +// care a lot about performance. Thus, we solve this by the simplest possible +// way, namely having one ffmpeg mux per output. + #include #include #include @@ -14,9 +21,9 @@ extern "C" { class HTTPD { public: - HTTPD(); + HTTPD(const char *output_filename, int width, int height); void start(int port); - void add_packet(const AVPacket &pkt); + void add_packet(const AVPacket &pkt, int64_t pts, int64_t dts); private: static int answer_to_connection_thunk(void *cls, MHD_Connection *connection, @@ -31,23 +38,32 @@ private: static void free_stream(void *cls); + class Mux { + public: + Mux(AVFormatContext *avctx, int width, int height); // Takes ownership of avctx. + ~Mux(); + void add_packet(const AVPacket &pkt, int64_t pts, int64_t dts); + + private: + AVFormatContext *avctx; + AVStream *avstream_video, *avstream_audio; + }; + class Stream { public: - Stream(AVOutputFormat *oformat); - ~Stream(); + Stream(AVOutputFormat *oformat, int width, int height); static ssize_t reader_callback_thunk(void *cls, uint64_t pos, char *buf, size_t max); ssize_t reader_callback(uint64_t pos, char *buf, size_t max); - void add_packet(const AVPacket &pkt); + void add_packet(const AVPacket &pkt, int64_t pts, int64_t dts); private: static int write_packet_thunk(void *opaque, uint8_t *buf, int buf_size); int write_packet(uint8_t *buf, int buf_size); AVIOContext *avio; - AVFormatContext *avctx; - AVStream *avstream_video, *avstream_audio; + std::unique_ptr mux; std::mutex buffer_mutex; std::condition_variable has_buffered_data; @@ -56,6 +72,9 @@ private: }; std::vector streams; // Not owned. + + int width, height; + std::unique_ptr file_mux; // To local disk. }; #endif // !defined(_HTTPD_H) diff --git a/mixer.cpp b/mixer.cpp index 28cbed7..59f520a 100644 --- a/mixer.cpp +++ b/mixer.cpp @@ -71,7 +71,8 @@ void convert_fixed24_to_fp32(float *dst, size_t out_channels, const uint8_t *src } // namespace Mixer::Mixer(const QSurfaceFormat &format) - : mixer_surface(create_surface(format)), + : httpd("test.ts", WIDTH, HEIGHT), + mixer_surface(create_surface(format)), h264_encoder_surface(create_surface(format)) { httpd.start(9095); @@ -99,7 +100,7 @@ Mixer::Mixer(const QSurfaceFormat &format) display_chain->set_dither_bits(0); // Don't bother. display_chain->finalize(); - h264_encoder.reset(new H264Encoder(h264_encoder_surface, WIDTH, HEIGHT, "test.ts", &httpd)); + h264_encoder.reset(new H264Encoder(h264_encoder_surface, WIDTH, HEIGHT, &httpd)); for (int card_index = 0; card_index < NUM_CARDS; ++card_index) { printf("Configuring card %d...\n", card_index); -- 2.39.2