AVPacket pkt;
memset(&pkt, 0, sizeof(pkt));
pkt.buf = nullptr;
- pkt.pts = av_rescale_q(task.pts + global_delay, AVRational{1, TIMEBASE}, avstream_video->time_base);
- pkt.dts = av_rescale_q(task.dts + global_delay, AVRational{1, TIMEBASE}, avstream_video->time_base);
pkt.data = reinterpret_cast<uint8_t *>(&data[0]);
pkt.size = data.size();
pkt.stream_index = 0;
pkt.flags = 0;
}
//pkt.duration = 1;
- httpd->add_packet(pkt);
- av_interleaved_write_frame(avctx, &pkt);
+ httpd->add_packet(pkt, task.pts + global_delay, task.dts + global_delay);
}
// Encode and add all audio frames up to and including the pts of this video frame.
// (They can never be queued to us after the video frame they belong to, only before.)
audio = move(it->second);
pending_audio_frames.erase(it);
}
+
AVFrame *frame = avcodec_alloc_frame();
frame->nb_samples = audio.size() / 2;
frame->format = AV_SAMPLE_FMT_FLT;
pkt.data = nullptr;
pkt.size = 0;
int got_output;
- avcodec_encode_audio2(avstream_audio->codec, &pkt, frame, &got_output);
+ avcodec_encode_audio2(context_audio, &pkt, frame, &got_output);
if (got_output) {
- pkt.pts = av_rescale_q(audio_pts + global_delay, AVRational{1, TIMEBASE}, avstream_audio->time_base);
- pkt.dts = pkt.pts;
pkt.stream_index = 1;
- httpd->add_packet(pkt);
- av_interleaved_write_frame(avctx, &pkt);
+ httpd->add_packet(pkt, audio_pts + global_delay, audio_pts + global_delay);
}
// TODO: Delayed frames.
avcodec_free_frame(&frame);
return 0;
}
-
-//H264Encoder::H264Encoder(SDL_Window *window, SDL_GLContext context, int width, int height, const char *output_filename)
-H264Encoder::H264Encoder(QSurface *surface, int width, int height, const char *output_filename, HTTPD *httpd)
+H264Encoder::H264Encoder(QSurface *surface, int width, int height, HTTPD *httpd)
: current_storage_frame(0), surface(surface), httpd(httpd)
- //: width(width), height(height), current_encoding_frame(0)
{
- avctx = avformat_alloc_context();
- avctx->oformat = av_guess_format(NULL, output_filename, NULL);
- strcpy(avctx->filename, output_filename);
- if (avio_open2(&avctx->pb, output_filename, AVIO_FLAG_WRITE, &avctx->interrupt_callback, NULL) < 0) {
- fprintf(stderr, "%s: avio_open2() failed\n", output_filename);
- exit(1);
- }
- AVCodec *codec_video = avcodec_find_encoder(AV_CODEC_ID_H264);
- avstream_video = avformat_new_stream(avctx, codec_video);
- if (avstream_video == nullptr) {
- fprintf(stderr, "%s: avformat_new_stream() failed\n", output_filename);
- exit(1);
- }
- avstream_video->time_base = AVRational{1, TIMEBASE};
- avstream_video->codec->width = width;
- avstream_video->codec->height = height;
- avstream_video->codec->time_base = AVRational{1, TIMEBASE};
- avstream_video->codec->ticks_per_frame = 1; // or 2?
-
AVCodec *codec_audio = avcodec_find_encoder(AV_CODEC_ID_MP3);
- avstream_audio = avformat_new_stream(avctx, codec_audio);
- if (avstream_audio == nullptr) {
- fprintf(stderr, "%s: avformat_new_stream() failed\n", output_filename);
- exit(1);
- }
- avstream_audio->time_base = AVRational{1, TIMEBASE};
- avstream_audio->codec->bit_rate = 256000;
- avstream_audio->codec->sample_rate = 48000;
- avstream_audio->codec->sample_fmt = AV_SAMPLE_FMT_FLTP;
- avstream_audio->codec->channels = 2;
- avstream_audio->codec->channel_layout = AV_CH_LAYOUT_STEREO;
- avstream_audio->codec->time_base = AVRational{1, TIMEBASE};
-
- /* open it */
- if (avcodec_open2(avstream_audio->codec, codec_audio, NULL) < 0) {
+ context_audio = avcodec_alloc_context3(codec_audio);
+ context_audio->bit_rate = 256000;
+ context_audio->sample_rate = 48000;
+ context_audio->sample_fmt = AV_SAMPLE_FMT_FLTP;
+ context_audio->channels = 2;
+ context_audio->channel_layout = AV_CH_LAYOUT_STEREO;
+ context_audio->time_base = AVRational{1, TIMEBASE};
+ if (avcodec_open2(context_audio, codec_audio, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
- if (avformat_write_header(avctx, NULL) < 0) {
- fprintf(stderr, "%s: avformat_write_header() failed\n", output_filename);
- exit(1);
- }
-
frame_width = width;
frame_height = height;
frame_width_mbaligned = (frame_width + 15) & (~15);
release_encode();
deinit_va();
-
- av_write_trailer(avctx);
- avformat_free_context(avctx);
}
bool H264Encoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
class H264Encoder {
public:
- H264Encoder(QSurface *surface, int width, int height, const char *output_filename, HTTPD *httpd);
+ H264Encoder(QSurface *surface, int width, int height, HTTPD *httpd);
~H264Encoder();
//void add_frame(FrameAllocator::Frame frame, GLsync fence);
std::map<int64_t, std::vector<float>> pending_audio_frames; // under frame_queue_mutex
QSurface *surface;
- AVFormatContext *avctx;
- AVStream *avstream_video;
- AVStream *avstream_audio;
+ AVCodecContext *context_audio;
HTTPD *httpd;
};
using namespace std;
-HTTPD::HTTPD() {}
+HTTPD::HTTPD(const char *output_filename, int width, int height)
+ : width(width), height(height)
+{
+ AVFormatContext *avctx = avformat_alloc_context();
+ avctx->oformat = av_guess_format(NULL, output_filename, NULL);
+ strcpy(avctx->filename, output_filename);
+ if (avio_open2(&avctx->pb, output_filename, AVIO_FLAG_WRITE, &avctx->interrupt_callback, NULL) < 0) {
+ fprintf(stderr, "%s: avio_open2() failed\n", output_filename);
+ exit(1);
+ }
+
+ file_mux.reset(new Mux(avctx, width, height));
+}
void HTTPD::start(int port)
{
&answer_to_connection_thunk, this, MHD_OPTION_END);
}
-void HTTPD::add_packet(const AVPacket &pkt)
+void HTTPD::add_packet(const AVPacket &pkt, int64_t pts, int64_t dts)
{
for (Stream *stream : streams) {
- stream->add_packet(pkt);
+ stream->add_packet(pkt, pts, dts);
}
+ file_mux->add_packet(pkt, pts, dts);
}
int HTTPD::answer_to_connection_thunk(void *cls, MHD_Connection *connection,
printf("url %s\n", url);
AVOutputFormat *oformat = av_guess_format("mpegts", nullptr, nullptr);
assert(oformat != nullptr);
- HTTPD::Stream *stream = new HTTPD::Stream(oformat);
+ HTTPD::Stream *stream = new HTTPD::Stream(oformat, width, height);
streams.push_back(stream);
MHD_Response *response = MHD_create_response_from_callback(
(size_t)-1, 1048576, &HTTPD::Stream::reader_callback_thunk, stream, &HTTPD::free_stream);
delete stream;
}
-HTTPD::Stream::Stream(AVOutputFormat *oformat)
+HTTPD::Mux::Mux(AVFormatContext *avctx, int width, int height)
+ : avctx(avctx)
{
- avctx = avformat_alloc_context();
- avctx->oformat = oformat;
- uint8_t *buf = (uint8_t *)av_malloc(1048576);
- avctx->pb = avio_alloc_context(buf, 1048576, 1, this, nullptr, &HTTPD::Stream::write_packet_thunk, nullptr);
- avctx->flags = AVFMT_FLAG_CUSTOM_IO;
-
- // TODO: Unify with the code in h264encoder.cpp.
AVCodec *codec_video = avcodec_find_encoder(AV_CODEC_ID_H264);
avstream_video = avformat_new_stream(avctx, codec_video);
if (avstream_video == nullptr) {
exit(1);
}
avstream_video->time_base = AVRational{1, TIMEBASE};
- avstream_video->codec->width = 1280; // FIXME
- avstream_video->codec->height = 720; // FIXME
+ avstream_video->codec->width = width;
+ avstream_video->codec->height = height;
avstream_video->codec->time_base = AVRational{1, TIMEBASE};
avstream_video->codec->ticks_per_frame = 1; // or 2?
}
}
-HTTPD::Stream::~Stream()
+HTTPD::Mux::~Mux()
{
+ av_write_trailer(avctx);
avformat_free_context(avctx);
}
+void HTTPD::Mux::add_packet(const AVPacket &pkt, int64_t pts, int64_t dts)
+{
+ AVPacket pkt_copy;
+ av_copy_packet(&pkt_copy, &pkt);
+ if (pkt.stream_index == 0) {
+ pkt_copy.pts = av_rescale_q(pts, AVRational{1, TIMEBASE}, avstream_video->time_base);
+ pkt_copy.dts = av_rescale_q(dts, AVRational{1, TIMEBASE}, avstream_video->time_base);
+ } else if (pkt.stream_index == 1) {
+ pkt_copy.pts = av_rescale_q(pts, AVRational{1, TIMEBASE}, avstream_audio->time_base);
+ pkt_copy.dts = av_rescale_q(dts, AVRational{1, TIMEBASE}, avstream_audio->time_base);
+ } else {
+ assert(false);
+ }
+
+ if (av_interleaved_write_frame(avctx, &pkt_copy) < 0) {
+ fprintf(stderr, "av_interleaved_write_frame() failed\n");
+ exit(1);
+ }
+}
+
+HTTPD::Stream::Stream(AVOutputFormat *oformat, int width, int height)
+{
+ AVFormatContext *avctx = avformat_alloc_context();
+ avctx->oformat = oformat;
+ uint8_t *buf = (uint8_t *)av_malloc(1048576);
+ avctx->pb = avio_alloc_context(buf, 1048576, 1, this, nullptr, &HTTPD::Stream::write_packet_thunk, nullptr);
+ avctx->flags = AVFMT_FLAG_CUSTOM_IO;
+
+ mux.reset(new Mux(avctx, width, height));
+}
+
ssize_t HTTPD::Stream::reader_callback_thunk(void *cls, uint64_t pos, char *buf, size_t max)
{
HTTPD::Stream *stream = (HTTPD::Stream *)cls;
return ret;
}
-void HTTPD::Stream::add_packet(const AVPacket &pkt)
+void HTTPD::Stream::add_packet(const AVPacket &pkt, int64_t pts, int64_t dts)
{
- AVPacket pkt_copy;
- av_copy_packet(&pkt_copy, &pkt);
- if (pkt.stream_index == 0) {
- pkt_copy.pts = av_rescale_q(pkt.pts, AVRational{1, TIMEBASE}, avstream_video->time_base);
- pkt_copy.dts = av_rescale_q(pkt.dts, AVRational{1, TIMEBASE}, avstream_video->time_base);
- } else if (pkt.stream_index == 1) {
- pkt_copy.pts = av_rescale_q(pkt.pts, AVRational{1, TIMEBASE}, avstream_audio->time_base);
- pkt_copy.dts = av_rescale_q(pkt.dts, AVRational{1, TIMEBASE}, avstream_audio->time_base);
- } else {
- assert(false);
- }
-
- if (av_interleaved_write_frame(avctx, &pkt_copy) < 0) {
- fprintf(stderr, "av_interleaved_write_frame() failed\n");
- exit(1);
- }
+ mux->add_packet(pkt, pts, dts);
}
int HTTPD::Stream::write_packet_thunk(void *opaque, uint8_t *buf, int buf_size)
#ifndef _HTTPD_H
#define _HTTPD_H
+// A class dealing with stream output, both to HTTP (thus the class name)
+// and to local output files. Since we generally have very few outputs
+// (end clients are not meant to connect directly to our stream; it should be
+// transcoded by something else and then sent to a reflector), we don't need to
+// care a lot about performance. Thus, we solve this by the simplest possible
+// way, namely having one ffmpeg mux per output.
+
#include <microhttpd.h>
#include <deque>
#include <string>
class HTTPD {
public:
- HTTPD();
+ HTTPD(const char *output_filename, int width, int height);
void start(int port);
- void add_packet(const AVPacket &pkt);
+ void add_packet(const AVPacket &pkt, int64_t pts, int64_t dts);
private:
static int answer_to_connection_thunk(void *cls, MHD_Connection *connection,
static void free_stream(void *cls);
+ class Mux {
+ public:
+ Mux(AVFormatContext *avctx, int width, int height); // Takes ownership of avctx.
+ ~Mux();
+ void add_packet(const AVPacket &pkt, int64_t pts, int64_t dts);
+
+ private:
+ AVFormatContext *avctx;
+ AVStream *avstream_video, *avstream_audio;
+ };
+
class Stream {
public:
- Stream(AVOutputFormat *oformat);
- ~Stream();
+ Stream(AVOutputFormat *oformat, int width, int height);
static ssize_t reader_callback_thunk(void *cls, uint64_t pos, char *buf, size_t max);
ssize_t reader_callback(uint64_t pos, char *buf, size_t max);
- void add_packet(const AVPacket &pkt);
+ void add_packet(const AVPacket &pkt, int64_t pts, int64_t dts);
private:
static int write_packet_thunk(void *opaque, uint8_t *buf, int buf_size);
int write_packet(uint8_t *buf, int buf_size);
AVIOContext *avio;
- AVFormatContext *avctx;
- AVStream *avstream_video, *avstream_audio;
+ std::unique_ptr<Mux> mux;
std::mutex buffer_mutex;
std::condition_variable has_buffered_data;
};
std::vector<Stream *> streams; // Not owned.
+
+ int width, height;
+ std::unique_ptr<Mux> file_mux; // To local disk.
};
#endif // !defined(_HTTPD_H)
} // namespace
Mixer::Mixer(const QSurfaceFormat &format)
- : mixer_surface(create_surface(format)),
+ : httpd("test.ts", WIDTH, HEIGHT),
+ mixer_surface(create_surface(format)),
h264_encoder_surface(create_surface(format))
{
httpd.start(9095);
display_chain->set_dither_bits(0); // Don't bother.
display_chain->finalize();
- h264_encoder.reset(new H264Encoder(h264_encoder_surface, WIDTH, HEIGHT, "test.ts", &httpd));
+ h264_encoder.reset(new H264Encoder(h264_encoder_surface, WIDTH, HEIGHT, &httpd));
for (int card_index = 0; card_index < NUM_CARDS; ++card_index) {
printf("Configuring card %d...\n", card_index);