fprintf(stderr, " ($DISPLAY spec or /dev/dri/render* path)\n");
fprintf(stderr, " --http-uncompressed-video send uncompressed NV12 video to HTTP clients\n");
fprintf(stderr, " --http-mux=NAME mux to use for HTTP streams (default " DEFAULT_STREAM_MUX_NAME ")\n");
+ fprintf(stderr, " --http-audio-codec=NAME audio codec to use for HTTP streams\n");
+ fprintf(stderr, " (default is to use the same as for the recording)\n");
+ fprintf(stderr, " --http-audio-bitrate=KBITS audio codec bit rate to use for HTTP streams\n");
+ fprintf(stderr, " (default is %d, ignored unless --http-audio-codec is set)\n",
+ DEFAULT_AUDIO_OUTPUT_BIT_RATE / 1000);
fprintf(stderr, " --http-coarse-timebase use less timebase for HTTP (recommended for muxers\n");
fprintf(stderr, " that handle large pts poorly, like e.g. MP4)\n");
fprintf(stderr, " --flat-audio start with most audio processing turned off\n");
{ "http-uncompressed-video", no_argument, 0, 1001 },
{ "http-mux", required_argument, 0, 1004 },
{ "http-coarse-timebase", no_argument, 0, 1005 },
+ { "http-audio-codec", required_argument, 0, 1006 },
+ { "http-audio-bitrate", required_argument, 0, 1007 },
{ "flat-audio", no_argument, 0, 1002 },
{ "no-flush-pbos", no_argument, 0, 1003 },
{ 0, 0, 0, 0 }
case 1005:
global_flags.stream_coarse_timebase = true;
break;
+ case 1006:
+ global_flags.stream_audio_codec_name = optarg;
+ break;
+ case 1007:
+ global_flags.stream_audio_codec_bitrate = atoi(optarg) * 1000;
+ break;
case 1002:
global_flags.flat_audio = true;
break;
bool flush_pbos = true;
std::string stream_mux_name = DEFAULT_STREAM_MUX_NAME;
bool stream_coarse_timebase = false;
+ std::string stream_audio_codec_name; // Blank = use the same as for the recording.
+ int stream_audio_codec_bitrate = DEFAULT_AUDIO_OUTPUT_BIT_RATE; // Ignored if stream_audio_codec_name is blank.
};
extern Flags global_flags;
void encode_audio(const vector<float> &audio,
vector<float> *audio_queue,
int64_t audio_pts,
- AVCodecContext *ctx);
+ AVCodecContext *ctx,
+ const vector<PacketDestination *> &destinations);
void encode_audio_one_frame(const float *audio,
size_t num_samples, // In each channel.
int64_t audio_pts,
- AVCodecContext *ctx);
+ AVCodecContext *ctx,
+ const vector<PacketDestination *> &destinations);
void storage_task_enqueue(storage_task task);
void save_codeddata(storage_task task);
int render_packedsequence();
map<int64_t, vector<float>> pending_audio_frames; // under frame_queue_mutex
QSurface *surface;
- AVCodecContext *context_audio;
- vector<float> audio_queue;
+ AVCodecContext *context_audio_file;
+ AVCodecContext *context_audio_stream = nullptr; // nullptr = don't code separate audio for stream.
+
+ vector<float> audio_queue_file;
+ vector<float> audio_queue_stream;
AVFrame *audio_frame = nullptr;
HTTPD *httpd;
pending_audio_frames.erase(it);
}
- encode_audio(audio, &audio_queue, audio_pts, context_audio);
+ if (context_audio_stream) {
+ encode_audio(audio, &audio_queue_file, audio_pts, context_audio_file, { file_mux.get() });
+ encode_audio(audio, &audio_queue_stream, audio_pts, context_audio_stream, { httpd });
+ } else {
+ encode_audio(audio, &audio_queue_file, audio_pts, context_audio_file, { httpd, file_mux.get() });
+ }
if (audio_pts == task.pts) break;
}
const vector<float> &audio,
vector<float> *audio_queue,
int64_t audio_pts,
- AVCodecContext *ctx)
+ AVCodecContext *ctx,
+ const vector<PacketDestination *> &destinations)
{
if (ctx->frame_size == 0) {
// No queueing needed.
assert(audio_queue->empty());
assert(audio.size() % 2 == 0);
- encode_audio_one_frame(&audio[0], audio.size() / 2, audio_pts, ctx);
+ encode_audio_one_frame(&audio[0], audio.size() / 2, audio_pts, ctx, destinations);
return;
}
encode_audio_one_frame(&(*audio_queue)[sample_num],
ctx->frame_size,
audio_pts,
- ctx);
+ ctx,
+ destinations);
}
audio_queue->erase(audio_queue->begin(), audio_queue->begin() + sample_num);
}
const float *audio,
size_t num_samples,
int64_t audio_pts,
- AVCodecContext *ctx)
+ AVCodecContext *ctx,
+ const vector<PacketDestination *> &destinations)
{
audio_frame->nb_samples = num_samples;
audio_frame->channel_layout = AV_CH_LAYOUT_STEREO;
if (got_output) {
pkt.stream_index = 1;
pkt.flags = AV_PKT_FLAG_KEY;
- if (file_mux) {
- file_mux->add_packet(pkt, audio_pts + global_delay(), audio_pts + global_delay());
+ for (PacketDestination *dest : destinations) {
+ dest->add_packet(pkt, audio_pts + global_delay(), audio_pts + global_delay());
}
- httpd->add_packet(pkt, audio_pts + global_delay(), audio_pts + global_delay());
}
// TODO: Delayed frames.
av_frame_unref(audio_frame);
H264EncoderImpl::H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd)
: current_storage_frame(0), surface(surface), httpd(httpd)
{
- init_audio_encoder(AUDIO_OUTPUT_CODEC_NAME, AUDIO_OUTPUT_BIT_RATE, &context_audio);
+ init_audio_encoder(AUDIO_OUTPUT_CODEC_NAME, DEFAULT_AUDIO_OUTPUT_BIT_RATE, &context_audio_file);
+
+ if (!global_flags.stream_audio_codec_name.empty()) {
+ init_audio_encoder(global_flags.stream_audio_codec_name,
+ global_flags.stream_audio_codec_bitrate, &context_audio_stream);
+ }
audio_frame = av_frame_alloc();
exit(1);
}
- file_mux.reset(new Mux(avctx, frame_width, frame_height, Mux::CODEC_H264, TIMEBASE));
+ file_mux.reset(new Mux(avctx, frame_width, frame_height, Mux::CODEC_H264, TIMEBASE, DEFAULT_AUDIO_OUTPUT_BIT_RATE));
}
void H264EncoderImpl::close_output_file()
AVOutputFormat *oformat = av_guess_format(global_flags.stream_mux_name.c_str(), nullptr, nullptr);
assert(oformat != nullptr);
+ // TODO: This is an ugly place to have this logic.
+ const int bit_rate = global_flags.stream_audio_codec_name.empty() ?
+ DEFAULT_AUDIO_OUTPUT_BIT_RATE :
+ global_flags.stream_audio_codec_bitrate;
+
int time_base = global_flags.stream_coarse_timebase ? COARSE_TIMEBASE : TIMEBASE;
- HTTPD::Stream *stream = new HTTPD::Stream(oformat, width, height, time_base);
+ HTTPD::Stream *stream = new HTTPD::Stream(oformat, width, height, time_base, bit_rate);
{
unique_lock<mutex> lock(streams_mutex);
streams.insert(stream);
}
}
-HTTPD::Stream::Stream(AVOutputFormat *oformat, int width, int height, int time_base)
+HTTPD::Stream::Stream(AVOutputFormat *oformat, int width, int height, int time_base, int bit_rate)
{
AVFormatContext *avctx = avformat_alloc_context();
avctx->oformat = oformat;
avctx->flags = AVFMT_FLAG_CUSTOM_IO;
- mux.reset(new Mux(avctx, width, height, video_codec, time_base));
+ mux.reset(new Mux(avctx, width, height, video_codec, time_base, bit_rate));
}
ssize_t HTTPD::Stream::reader_callback_thunk(void *cls, uint64_t pos, char *buf, size_t max)
#include "mux.h"
-class HTTPD {
+class HTTPD : public PacketDestination {
public:
HTTPD(int width, int height);
void start(int port);
- void add_packet(const AVPacket &pkt, int64_t pts, int64_t dts);
+ void add_packet(const AVPacket &pkt, int64_t pts, int64_t dts) override;
private:
static int answer_to_connection_thunk(void *cls, MHD_Connection *connection,
class Stream {
public:
- Stream(AVOutputFormat *oformat, int width, int height, int time_base);
+ Stream(AVOutputFormat *oformat, int width, int height, int time_base, int bit_rate);
static ssize_t reader_callback_thunk(void *cls, uint64_t pos, char *buf, size_t max);
ssize_t reader_callback(uint64_t pos, char *buf, size_t max);
using namespace std;
-Mux::Mux(AVFormatContext *avctx, int width, int height, Codec video_codec, int time_base)
+Mux::Mux(AVFormatContext *avctx, int width, int height, Codec video_codec, int time_base, int bit_rate)
: avctx(avctx)
{
AVCodec *codec_video = avcodec_find_encoder((video_codec == CODEC_H264) ? AV_CODEC_ID_H264 : AV_CODEC_ID_RAWVIDEO);
exit(1);
}
avstream_audio->time_base = AVRational{1, time_base};
- avstream_audio->codec->bit_rate = AUDIO_OUTPUT_BIT_RATE;
+ avstream_audio->codec->bit_rate = bit_rate;
avstream_audio->codec->sample_rate = OUTPUT_FREQUENCY;
avstream_audio->codec->channels = 2;
avstream_audio->codec->channel_layout = AV_CH_LAYOUT_STEREO;
#include <libavformat/avio.h>
}
-class Mux {
+class PacketDestination {
+public:
+ virtual ~PacketDestination() {}
+ virtual void add_packet(const AVPacket &pkt, int64_t pts, int64_t dts) = 0;
+};
+
+class Mux : public PacketDestination {
public:
enum Codec {
CODEC_H264,
CODEC_NV12, // Uncompressed 4:2:0.
};
- Mux(AVFormatContext *avctx, int width, int height, Codec video_codec, int time_base); // Takes ownership of avctx.
+ // Takes ownership of avctx.
+ Mux(AVFormatContext *avctx, int width, int height, Codec video_codec, int time_base, int bit_rate);
~Mux();
- void add_packet(const AVPacket &pkt, int64_t pts, int64_t dts);
+ void add_packet(const AVPacket &pkt, int64_t pts, int64_t dts) override;
private:
bool seen_keyframe = false;