#include <assert.h>
#include <epoxy/egl.h>
#include <libavcodec/avcodec.h>
-#include <libavformat/avio.h>
-#include <libavutil/mathematics.h>
+#include <libavutil/channel_layout.h>
+#include <libavutil/frame.h>
#include <libavutil/rational.h>
+#include <libavutil/samplefmt.h>
#include <libdrm/drm_fourcc.h>
-#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <va/va_enc_h264.h>
#include <va/va_x11.h>
#include <condition_variable>
+#include <cstdint>
+#include <memory>
#include <mutex>
#include <queue>
#include <string>
#include <thread>
+#include <utility>
#include "context.h"
+#include "defs.h"
#include "httpd.h"
#include "timebase.h"
static int frame_height = 144;
static int frame_width_mbaligned;
static int frame_height_mbaligned;
-static int frame_rate = 60;
static unsigned int frame_bitrate = 0;
static unsigned int frame_slices = 1;
static double frame_size = 0;
//static int initial_qp = 28;
static int minimal_qp = 0;
static int intra_period = 30;
-static int intra_idr_period = 60;
+static int intra_idr_period = MAX_FPS; // About a second; more at lower frame rates. Not ideal.
static int ip_period = 3;
static int rc_mode = -1;
static int rc_default_modes[] = {
using namespace std;
+// Supposedly vaRenderPicture() is supposed to destroy the buffer implicitly,
+// but if we don't delete it here, we get leaks. The GStreamer implementation
+// does the same.
+static void render_picture_and_delete(VADisplay dpy, VAContextID context, VABufferID *buffers, int num_buffers)
+{
+ VAStatus va_status = vaRenderPicture(dpy, context, buffers, num_buffers);
+ CHECK_VASTATUS(va_status, "vaRenderPicture");
+
+ for (int i = 0; i < num_buffers; ++i) {
+ va_status = vaDestroyBuffer(va_dpy, buffers[i]);
+ CHECK_VASTATUS(va_status, "vaDestroyBuffer");
+ }
+}
+
static unsigned int
va_swap32(unsigned int val)
{
bitstream_put_ui(bs, 1, 1); /* vui_parameters_present_flag */
bitstream_put_ui(bs, 0, 1); /* aspect_ratio_info_present_flag */
bitstream_put_ui(bs, 0, 1); /* overscan_info_present_flag */
- bitstream_put_ui(bs, 0, 1); /* video_signal_type_present_flag */
+ bitstream_put_ui(bs, 1, 1); /* video_signal_type_present_flag */
+ {
+ bitstream_put_ui(bs, 5, 3); /* video_format (5 = Unspecified) */
+ bitstream_put_ui(bs, 0, 1); /* video_full_range_flag */
+ bitstream_put_ui(bs, 1, 1); /* colour_description_present_flag */
+ {
+ bitstream_put_ui(bs, 1, 8); /* colour_primaries (1 = BT.709) */
+ bitstream_put_ui(bs, 2, 8); /* transfer_characteristics (2 = unspecified, since we use sRGB) */
+ bitstream_put_ui(bs, 6, 8); /* matrix_coefficients (6 = BT.601/SMPTE 170M) */
+ }
+ }
bitstream_put_ui(bs, 0, 1); /* chroma_loc_info_present_flag */
bitstream_put_ui(bs, 1, 1); /* timing_info_present_flag */
{
//
// Getting pts and dts right with variable frame rate (VFR) and B-frames can be a
// bit tricky. We assume first of all that the frame rate never goes _above_
-// <frame_rate>, which gives us a frame period N. The decoder can always decode
+// MAX_FPS, which gives us a frame period N. The decoder can always decode
// in at least this speed, as long at dts <= pts (the frame is not attempted
// presented before it is decoded). Furthermore, we never have longer chains of
// B-frames than a fixed constant C. (In a B-frame chain, we say that the base
*displaying_order = encoding_order;
// IDR frames are a special case; I honestly can't find the logic behind
// why this is the right thing, but it seems to line up nicely in practice :-)
- *pts_lag = TIMEBASE / frame_rate;
+ *pts_lag = TIMEBASE / MAX_FPS;
} else if (((encoding_order_gop - 1) % ip_period) != 0) { /* B frames */
*frame_type = FRAME_B;
*displaying_order = encoding_order - 1;
}
if (frame_bitrate == 0)
- frame_bitrate = frame_width * frame_height * 12 * frame_rate / 50;
+ frame_bitrate = frame_width * frame_height * 12 * MAX_FPS / 50;
if (coded_fn == NULL) {
struct stat buf;
render_id[0] = seq_param_buf;
render_id[1] = rc_param_buf;
- va_status = vaRenderPicture(va_dpy, context_id, &render_id[0], 2);
- CHECK_VASTATUS(va_status, "vaRenderPicture");;
+ render_picture_and_delete(va_dpy, context_id, &render_id[0], 2);
if (misc_priv_type != 0) {
va_status = vaCreateBuffer(va_dpy, context_id,
misc_param_tmp->data[0] = misc_priv_value;
vaUnmapBuffer(va_dpy, misc_param_tmpbuf);
- va_status = vaRenderPicture(va_dpy, context_id, &misc_param_tmpbuf, 1);
+ render_picture_and_delete(va_dpy, context_id, &misc_param_tmpbuf, 1);
}
return 0;
va_status = vaCreateBuffer(va_dpy, context_id, VAEncPictureParameterBufferType,
sizeof(pic_param), 1, &pic_param, &pic_param_buf);
- CHECK_VASTATUS(va_status, "vaCreateBuffer");;
+ CHECK_VASTATUS(va_status, "vaCreateBuffer");
- va_status = vaRenderPicture(va_dpy, context_id, &pic_param_buf, 1);
- CHECK_VASTATUS(va_status, "vaRenderPicture");
+ render_picture_and_delete(va_dpy, context_id, &pic_param_buf, 1);
return 0;
}
render_id[0] = packedseq_para_bufid;
render_id[1] = packedseq_data_bufid;
- va_status = vaRenderPicture(va_dpy, context_id, render_id, 2);
- CHECK_VASTATUS(va_status, "vaRenderPicture");
+ render_picture_and_delete(va_dpy, context_id, render_id, 2);
free(packedseq_buffer);
render_id[0] = packedpic_para_bufid;
render_id[1] = packedpic_data_bufid;
- va_status = vaRenderPicture(va_dpy, context_id, render_id, 2);
- CHECK_VASTATUS(va_status, "vaRenderPicture");
+ render_picture_and_delete(va_dpy, context_id, render_id, 2);
free(packedpic_buffer);
render_id[0] = packedslice_para_bufid;
render_id[1] = packedslice_data_bufid;
- va_status = vaRenderPicture(va_dpy, context_id, render_id, 2);
- CHECK_VASTATUS(va_status, "vaRenderPicture");
+ render_picture_and_delete(va_dpy, context_id, render_id, 2);
free(packedslice_buffer);
}
va_status = vaCreateBuffer(va_dpy, context_id, VAEncSliceParameterBufferType,
sizeof(slice_param), 1, &slice_param, &slice_param_buf);
- CHECK_VASTATUS(va_status, "vaCreateBuffer");;
+ CHECK_VASTATUS(va_status, "vaCreateBuffer");
+
+ render_picture_and_delete(va_dpy, context_id, &slice_param_buf, 1);
- va_status = vaRenderPicture(va_dpy, context_id, &slice_param_buf, 1);
- CHECK_VASTATUS(va_status, "vaRenderPicture");
-
return 0;
}
-int H264Encoder::save_codeddata(storage_task task)
+void H264Encoder::save_codeddata(storage_task task)
{
VACodedBufferSegment *buf_list = NULL;
VAStatus va_status;
string data;
- const int64_t global_delay = (ip_period - 1) * (TIMEBASE / frame_rate); // So we never get negative dts.
+ const int64_t global_delay = (ip_period - 1) * (TIMEBASE / MAX_FPS); // So we never get negative dts.
va_status = vaMapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list));
CHECK_VASTATUS(va_status, "vaMapBuffer");
AVPacket pkt;
memset(&pkt, 0, sizeof(pkt));
pkt.buf = nullptr;
- pkt.pts = av_rescale_q(task.pts + global_delay, AVRational{1, TIMEBASE}, avstream_video->time_base);
- pkt.dts = av_rescale_q(task.dts + global_delay, AVRational{1, TIMEBASE}, avstream_video->time_base);
pkt.data = reinterpret_cast<uint8_t *>(&data[0]);
pkt.size = data.size();
pkt.stream_index = 0;
pkt.flags = 0;
}
//pkt.duration = 1;
- httpd->add_packet(pkt);
- av_interleaved_write_frame(avctx, &pkt);
+ httpd->add_packet(pkt, task.pts + global_delay, task.dts + global_delay);
}
// Encode and add all audio frames up to and including the pts of this video frame.
- // (They can never be queued to us after the video frame they belong to, only before.)
for ( ;; ) {
int64_t audio_pts;
std::vector<float> audio;
{
unique_lock<mutex> lock(frame_queue_mutex);
- if (pending_audio_frames.empty()) break;
+ frame_queue_nonempty.wait(lock, [this]{ return copy_thread_should_quit || !pending_audio_frames.empty(); });
+ if (copy_thread_should_quit) return;
auto it = pending_audio_frames.begin();
if (it->first > task.pts) break;
audio_pts = it->first;
audio = move(it->second);
pending_audio_frames.erase(it);
}
+
AVFrame *frame = avcodec_alloc_frame();
frame->nb_samples = audio.size() / 2;
- frame->format = AV_SAMPLE_FMT_FLT;
+ frame->format = AV_SAMPLE_FMT_S32;
frame->channel_layout = AV_CH_LAYOUT_STEREO;
- unique_ptr<float[]> planar_samples(new float[audio.size()]);
- avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_FLTP, (const uint8_t*)planar_samples.get(), audio.size() * sizeof(float), 0);
- for (int i = 0; i < frame->nb_samples; ++i) {
- planar_samples[i] = audio[i * 2 + 0];
- planar_samples[i + frame->nb_samples] = audio[i * 2 + 1];
+ unique_ptr<int32_t[]> int_samples(new int32_t[audio.size()]);
+ avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), audio.size() * sizeof(int32_t), 0);
+ for (int i = 0; i < frame->nb_samples * 2; ++i) {
+ if (audio[i] >= 1.0f) {
+ int_samples[i] = 2147483647;
+ } else if (audio[i] <= -1.0f) {
+ int_samples[i] = -2147483647;
+ } else {
+ int_samples[i] = lrintf(audio[i] * 2147483647.0f);
+ }
}
AVPacket pkt;
pkt.data = nullptr;
pkt.size = 0;
int got_output;
- avcodec_encode_audio2(avstream_audio->codec, &pkt, frame, &got_output);
+ avcodec_encode_audio2(context_audio, &pkt, frame, &got_output);
if (got_output) {
- pkt.pts = av_rescale_q(audio_pts + global_delay, AVRational{1, TIMEBASE}, avstream_audio->time_base);
- pkt.dts = pkt.pts;
pkt.stream_index = 1;
- httpd->add_packet(pkt);
- av_interleaved_write_frame(avctx, &pkt);
+ httpd->add_packet(pkt, audio_pts + global_delay, audio_pts + global_delay);
}
// TODO: Delayed frames.
avcodec_free_frame(&frame);
+ av_free_packet(&pkt);
+ if (audio_pts == task.pts) break;
}
#if 0
printf("%08lld", encode_order);
printf("(%06d bytes coded)", coded_size);
#endif
-
- return 0;
}
if (rc_mode != -1)
printf("INPUT: RateControl : %s\n", rc_to_string(rc_mode));
printf("INPUT: Resolution : %dx%dframes\n", frame_width, frame_height);
- printf("INPUT: FrameRate : %d\n", frame_rate);
printf("INPUT: Bitrate : %d\n", frame_bitrate);
printf("INPUT: Slieces : %d\n", frame_slices);
printf("INPUT: IntraPeriod : %d\n", intra_period);
return 0;
}
-
-//H264Encoder::H264Encoder(SDL_Window *window, SDL_GLContext context, int width, int height, const char *output_filename)
-H264Encoder::H264Encoder(QSurface *surface, int width, int height, const char *output_filename, HTTPD *httpd)
+H264Encoder::H264Encoder(QSurface *surface, int width, int height, HTTPD *httpd)
: current_storage_frame(0), surface(surface), httpd(httpd)
- //: width(width), height(height), current_encoding_frame(0)
{
- avctx = avformat_alloc_context();
- avctx->oformat = av_guess_format(NULL, output_filename, NULL);
- strcpy(avctx->filename, output_filename);
- if (avio_open2(&avctx->pb, output_filename, AVIO_FLAG_WRITE, &avctx->interrupt_callback, NULL) < 0) {
- fprintf(stderr, "%s: avio_open2() failed\n", output_filename);
- exit(1);
- }
- AVCodec *codec_video = avcodec_find_encoder(AV_CODEC_ID_H264);
- avstream_video = avformat_new_stream(avctx, codec_video);
- if (avstream_video == nullptr) {
- fprintf(stderr, "%s: avformat_new_stream() failed\n", output_filename);
- exit(1);
- }
- avstream_video->time_base = AVRational{1, TIMEBASE};
- avstream_video->codec->width = width;
- avstream_video->codec->height = height;
- avstream_video->codec->time_base = AVRational{1, TIMEBASE};
- avstream_video->codec->ticks_per_frame = 1; // or 2?
-
- AVCodec *codec_audio = avcodec_find_encoder(AV_CODEC_ID_MP3);
- avstream_audio = avformat_new_stream(avctx, codec_audio);
- if (avstream_audio == nullptr) {
- fprintf(stderr, "%s: avformat_new_stream() failed\n", output_filename);
- exit(1);
- }
- avstream_audio->time_base = AVRational{1, TIMEBASE};
- avstream_audio->codec->bit_rate = 256000;
- avstream_audio->codec->sample_rate = 48000;
- avstream_audio->codec->sample_fmt = AV_SAMPLE_FMT_FLTP;
- avstream_audio->codec->channels = 2;
- avstream_audio->codec->channel_layout = AV_CH_LAYOUT_STEREO;
- avstream_audio->codec->time_base = AVRational{1, TIMEBASE};
-
- /* open it */
- if (avcodec_open2(avstream_audio->codec, codec_audio, NULL) < 0) {
+ AVCodec *codec_audio = avcodec_find_encoder(AUDIO_OUTPUT_CODEC);
+ context_audio = avcodec_alloc_context3(codec_audio);
+ context_audio->bit_rate = AUDIO_OUTPUT_BIT_RATE;
+ context_audio->sample_rate = OUTPUT_FREQUENCY;
+ context_audio->sample_fmt = AUDIO_OUTPUT_SAMPLE_FMT;
+ context_audio->channels = 2;
+ context_audio->channel_layout = AV_CH_LAYOUT_STEREO;
+ context_audio->time_base = AVRational{1, TIMEBASE};
+ if (avcodec_open2(context_audio, codec_audio, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
- if (avformat_write_header(avctx, NULL) < 0) {
- fprintf(stderr, "%s: avformat_write_header() failed\n", output_filename);
- exit(1);
- }
-
frame_width = width;
frame_height = height;
frame_width_mbaligned = (frame_width + 15) & (~15);
copy_thread = std::thread([this]{
//SDL_GL_MakeCurrent(window, context);
- QOpenGLContext *context = create_context();
+ QOpenGLContext *context = create_context(this->surface);
eglBindAPI(EGL_OPENGL_API);
if (!make_current(context, this->surface)) {
printf("display=%p surface=%p context=%p curr=%p err=%d\n", eglGetCurrentDisplay(), this->surface, context, eglGetCurrentContext(),
{
unique_lock<mutex> lock(frame_queue_mutex);
copy_thread_should_quit = true;
- frame_queue_nonempty.notify_one();
+ frame_queue_nonempty.notify_all();
}
storage_thread.join();
copy_thread.join();
release_encode();
deinit_va();
-
- av_write_trailer(avctx);
- avformat_free_context(avctx);
}
bool H264Encoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
unique_lock<mutex> lock(frame_queue_mutex);
pending_audio_frames[pts] = move(audio);
}
- frame_queue_nonempty.notify_one();
+ frame_queue_nonempty.notify_all();
}
-
void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const std::vector<RefCountedFrame> &input_frames)
{
{
pending_video_frames[current_storage_frame] = PendingFrame{ fence, input_frames, pts };
++current_storage_frame;
}
- frame_queue_nonempty.notify_one();
+ frame_queue_nonempty.notify_all();
}
void H264Encoder::copy_thread_func()
int64_t dts;
if (pts_lag == -1) {
assert(last_dts != -1);
- dts = last_dts + (TIMEBASE / frame_rate);
+ dts = last_dts + (TIMEBASE / MAX_FPS);
} else {
dts = pts - pts_lag;
}