#include <X11/Xlib.h>
#include <assert.h>
#include <epoxy/egl.h>
+extern "C" {
#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
#include <libavutil/channel_layout.h>
#include <libavutil/frame.h>
#include <libavutil/rational.h>
#include <libavutil/samplefmt.h>
+}
#include <libdrm/drm_fourcc.h>
#include <stdio.h>
#include <stdlib.h>
#include <va/va_x11.h>
#include <condition_variable>
#include <cstdint>
+#include <map>
#include <memory>
#include <mutex>
#include <queue>
VA_RC_VCM,
VA_RC_NONE,
};
-static unsigned long long current_frame_encoding = 0;
-static unsigned long long current_frame_display = 0;
-static unsigned long long current_IDR_display = 0;
static unsigned int current_frame_num = 0;
-static int current_frame_type;
static int misc_priv_type = 0;
static int misc_priv_value = 0;
using namespace std;
+class H264EncoderImpl {
+public:
+ H264EncoderImpl(QSurface *surface, int width, int height, HTTPD *httpd);
+ ~H264EncoderImpl();
+ void add_audio(int64_t pts, std::vector<float> audio); // Needs to come before end_frame() of same pts.
+ bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex);
+ void end_frame(RefCountedGLsync fence, int64_t pts, const std::vector<RefCountedFrame> &input_frames);
+
+private:
+ struct storage_task {
+ unsigned long long display_order;
+ int frame_type;
+ std::vector<float> audio;
+ int64_t pts, dts;
+ };
+ struct PendingFrame {
+ RefCountedGLsync fence;
+ std::vector<RefCountedFrame> input_frames;
+ int64_t pts;
+ };
+
+ void encode_thread_func();
+ void encode_remaining_frames_as_p(int encoding_frame_num, int gop_start_display_frame_num, int64_t last_dts);
+ void encode_frame(PendingFrame frame, int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num,
+ int frame_type, int64_t pts, int64_t dts);
+ void storage_task_thread();
+ void storage_task_enqueue(storage_task task);
+ void save_codeddata(storage_task task);
+
+ std::thread encode_thread, storage_thread;
+
+ std::mutex storage_task_queue_mutex;
+ std::condition_variable storage_task_queue_changed;
+ int srcsurface_status[SURFACE_NUM]; // protected by storage_task_queue_mutex
+ std::queue<storage_task> storage_task_queue; // protected by storage_task_queue_mutex
+ bool storage_thread_should_quit = false; // protected by storage_task_queue_mutex
+
+ std::mutex frame_queue_mutex;
+ std::condition_variable frame_queue_nonempty;
+ bool encode_thread_should_quit = false; // under frame_queue_mutex
+
+ //int frame_width, frame_height;
+ //int ;
+ int current_storage_frame;
+
+ std::map<int, PendingFrame> pending_video_frames; // under frame_queue_mutex
+ std::map<int64_t, std::vector<float>> pending_audio_frames; // under frame_queue_mutex
+ QSurface *surface;
+
+ AVCodecContext *context_audio;
+ HTTPD *httpd;
+};
+
+
// Supposedly vaRenderPicture() is supposed to destroy the buffer implicitly,
// but if we don't delete it here, we get leaks. The GStreamer implementation
// does the same.
#define FRAME_I 2
#define FRAME_IDR 7
void encoding2display_order(
- unsigned long long encoding_order, int intra_period,
+ int encoding_order, int intra_period,
int intra_idr_period, int ip_period,
- unsigned long long *displaying_order,
+ int *displaying_order,
int *frame_type, int *pts_lag)
{
int encoding_order_gop = 0;
sort_one(ref, j+1, right, list1_ascending, frame_idx);
}
-static int update_ReferenceFrames(void)
+static void update_ReferenceFrames(int frame_type)
{
int i;
- if (current_frame_type == FRAME_B)
- return 0;
+ if (frame_type == FRAME_B)
+ return;
CurrentCurrPic.flags = VA_PICTURE_H264_SHORT_TERM_REFERENCE;
numShortTerm++;
ReferenceFrames[i] = ReferenceFrames[i-1];
ReferenceFrames[0] = CurrentCurrPic;
- if (current_frame_type != FRAME_B)
- current_frame_num++;
+ current_frame_num++;
if (current_frame_num > MaxFrameNum)
current_frame_num = 0;
-
- return 0;
}
-static int update_RefPicList(void)
+static int update_RefPicList(int frame_type)
{
unsigned int current_poc = CurrentCurrPic.TopFieldOrderCnt;
- if (current_frame_type == FRAME_P) {
+ if (frame_type == FRAME_P) {
memcpy(RefPicList0_P, ReferenceFrames, numShortTerm * sizeof(VAPictureH264));
sort_one(RefPicList0_P, 0, numShortTerm-1, 0, 1);
}
- if (current_frame_type == FRAME_B) {
+ if (frame_type == FRAME_B) {
memcpy(RefPicList0_B, ReferenceFrames, numShortTerm * sizeof(VAPictureH264));
sort_two(RefPicList0_B, 0, numShortTerm-1, current_poc, 0,
1, 0, 1);
return 0;
}
-static int calc_poc(int pic_order_cnt_lsb)
+static int calc_poc(int pic_order_cnt_lsb, int frame_type)
{
static int PicOrderCntMsb_ref = 0, pic_order_cnt_lsb_ref = 0;
int prevPicOrderCntMsb, prevPicOrderCntLsb;
int PicOrderCntMsb, TopFieldOrderCnt;
- if (current_frame_type == FRAME_IDR)
+ if (frame_type == FRAME_IDR)
prevPicOrderCntMsb = prevPicOrderCntLsb = 0;
else {
prevPicOrderCntMsb = PicOrderCntMsb_ref;
TopFieldOrderCnt = PicOrderCntMsb + pic_order_cnt_lsb;
- if (current_frame_type != FRAME_B) {
+ if (frame_type != FRAME_B) {
PicOrderCntMsb_ref = PicOrderCntMsb;
pic_order_cnt_lsb_ref = pic_order_cnt_lsb;
}
return TopFieldOrderCnt;
}
-static int render_picture(void)
+static int render_picture(int frame_type, int display_frame_num, int gop_start_display_frame_num)
{
VABufferID pic_param_buf;
VAStatus va_status;
int i = 0;
- pic_param.CurrPic.picture_id = gl_surfaces[current_frame_display % SURFACE_NUM].ref_surface;
+ pic_param.CurrPic.picture_id = gl_surfaces[display_frame_num % SURFACE_NUM].ref_surface;
pic_param.CurrPic.frame_idx = current_frame_num;
pic_param.CurrPic.flags = 0;
- pic_param.CurrPic.TopFieldOrderCnt = calc_poc((current_frame_display - current_IDR_display) % MaxPicOrderCntLsb);
+ pic_param.CurrPic.TopFieldOrderCnt = calc_poc((display_frame_num - gop_start_display_frame_num) % MaxPicOrderCntLsb, frame_type);
pic_param.CurrPic.BottomFieldOrderCnt = pic_param.CurrPic.TopFieldOrderCnt;
CurrentCurrPic = pic_param.CurrPic;
- if (getenv("TO_DEL")) { /* set RefPicList into ReferenceFrames */
- update_RefPicList(); /* calc RefPicList */
- memset(pic_param.ReferenceFrames, 0xff, 16 * sizeof(VAPictureH264)); /* invalid all */
- if (current_frame_type == FRAME_P) {
- pic_param.ReferenceFrames[0] = RefPicList0_P[0];
- } else if (current_frame_type == FRAME_B) {
- pic_param.ReferenceFrames[0] = RefPicList0_B[0];
- pic_param.ReferenceFrames[1] = RefPicList1_B[0];
- }
- } else {
- memcpy(pic_param.ReferenceFrames, ReferenceFrames, numShortTerm*sizeof(VAPictureH264));
- for (i = numShortTerm; i < SURFACE_NUM; i++) {
- pic_param.ReferenceFrames[i].picture_id = VA_INVALID_SURFACE;
- pic_param.ReferenceFrames[i].flags = VA_PICTURE_H264_INVALID;
- }
+ memcpy(pic_param.ReferenceFrames, ReferenceFrames, numShortTerm*sizeof(VAPictureH264));
+ for (i = numShortTerm; i < SURFACE_NUM; i++) {
+ pic_param.ReferenceFrames[i].picture_id = VA_INVALID_SURFACE;
+ pic_param.ReferenceFrames[i].flags = VA_PICTURE_H264_INVALID;
}
- pic_param.pic_fields.bits.idr_pic_flag = (current_frame_type == FRAME_IDR);
- pic_param.pic_fields.bits.reference_pic_flag = (current_frame_type != FRAME_B);
+ pic_param.pic_fields.bits.idr_pic_flag = (frame_type == FRAME_IDR);
+ pic_param.pic_fields.bits.reference_pic_flag = (frame_type != FRAME_B);
pic_param.pic_fields.bits.entropy_coding_mode_flag = h264_entropy_mode;
pic_param.pic_fields.bits.deblocking_filter_control_present_flag = 1;
pic_param.frame_num = current_frame_num;
- pic_param.coded_buf = gl_surfaces[current_frame_display % SURFACE_NUM].coded_buf;
+ pic_param.coded_buf = gl_surfaces[display_frame_num % SURFACE_NUM].coded_buf;
pic_param.last_picture = false; // FIXME
pic_param.pic_init_qp = initial_qp;
free(packedslice_buffer);
}
-static int render_slice(void)
+static int render_slice(int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num, int frame_type)
{
VABufferID slice_param_buf;
VAStatus va_status;
int i;
- update_RefPicList();
+ update_RefPicList(frame_type);
/* one frame, one slice */
slice_param.macroblock_address = 0;
slice_param.num_macroblocks = frame_width_mbaligned * frame_height_mbaligned/(16*16); /* Measured by MB */
- slice_param.slice_type = (current_frame_type == FRAME_IDR)?2:current_frame_type;
- if (current_frame_type == FRAME_IDR) {
- if (current_frame_encoding != 0)
+ slice_param.slice_type = (frame_type == FRAME_IDR)?2:frame_type;
+ if (frame_type == FRAME_IDR) {
+ if (encoding_frame_num != 0)
++slice_param.idr_pic_id;
- } else if (current_frame_type == FRAME_P) {
+ } else if (frame_type == FRAME_P) {
int refpiclist0_max = h264_maxref & 0xffff;
memcpy(slice_param.RefPicList0, RefPicList0_P, refpiclist0_max*sizeof(VAPictureH264));
slice_param.RefPicList0[i].picture_id = VA_INVALID_SURFACE;
slice_param.RefPicList0[i].flags = VA_PICTURE_H264_INVALID;
}
- } else if (current_frame_type == FRAME_B) {
+ } else if (frame_type == FRAME_B) {
int refpiclist0_max = h264_maxref & 0xffff;
int refpiclist1_max = (h264_maxref >> 16) & 0xffff;
slice_param.slice_alpha_c0_offset_div2 = 0;
slice_param.slice_beta_offset_div2 = 0;
slice_param.direct_spatial_mv_pred_flag = 1;
- slice_param.pic_order_cnt_lsb = (current_frame_display - current_IDR_display) % MaxPicOrderCntLsb;
+ slice_param.pic_order_cnt_lsb = (display_frame_num - gop_start_display_frame_num) % MaxPicOrderCntLsb;
if (h264_packedheader &&
-void H264Encoder::save_codeddata(storage_task task)
+void H264EncoderImpl::save_codeddata(storage_task task)
{
VACodedBufferSegment *buf_list = NULL;
VAStatus va_status;
vector<float> audio;
{
unique_lock<mutex> lock(frame_queue_mutex);
- frame_queue_nonempty.wait(lock, [this]{ return copy_thread_should_quit || !pending_audio_frames.empty(); });
- if (copy_thread_should_quit) return;
+ frame_queue_nonempty.wait(lock, [this]{ return storage_thread_should_quit || !pending_audio_frames.empty(); });
+ if (storage_thread_should_quit && pending_audio_frames.empty()) return;
auto it = pending_audio_frames.begin();
if (it->first > task.pts) break;
audio_pts = it->first;
// this is weird. but it seems to put a new frame onto the queue
-void H264Encoder::storage_task_enqueue(storage_task task)
+void H264EncoderImpl::storage_task_enqueue(storage_task task)
{
unique_lock<mutex> lock(storage_task_queue_mutex);
storage_task_queue.push(move(task));
storage_task_queue_changed.notify_all();
}
-void H264Encoder::storage_task_thread()
+void H264EncoderImpl::storage_task_thread()
{
for ( ;; ) {
storage_task current;
// wait until there's an encoded frame
unique_lock<mutex> lock(storage_task_queue_mutex);
storage_task_queue_changed.wait(lock, [this]{ return storage_thread_should_quit || !storage_task_queue.empty(); });
- if (storage_thread_should_quit) return;
+ if (storage_thread_should_quit && storage_task_queue.empty()) return;
current = move(storage_task_queue.front());
storage_task_queue.pop();
}
}
-H264Encoder::H264Encoder(QSurface *surface, int width, int height, HTTPD *httpd)
+H264EncoderImpl::H264EncoderImpl(QSurface *surface, int width, int height, HTTPD *httpd)
: current_storage_frame(0), surface(surface), httpd(httpd)
{
AVCodec *codec_audio = avcodec_find_encoder(AUDIO_OUTPUT_CODEC);
frame_width_mbaligned = (frame_width + 15) & (~15);
frame_height_mbaligned = (frame_height + 15) & (~15);
frame_bitrate = 15000000; // / 60;
- current_frame_encoding = 0;
//print_input();
memset(&pic_param, 0, sizeof(pic_param));
memset(&slice_param, 0, sizeof(slice_param));
- storage_thread = thread(&H264Encoder::storage_task_thread, this);
+ storage_thread = thread(&H264EncoderImpl::storage_task_thread, this);
- copy_thread = thread([this]{
+ encode_thread = thread([this]{
//SDL_GL_MakeCurrent(window, context);
QOpenGLContext *context = create_context(this->surface);
eglBindAPI(EGL_OPENGL_API);
eglGetError());
exit(1);
}
- copy_thread_func();
+ encode_thread_func();
});
}
-H264Encoder::~H264Encoder()
+H264EncoderImpl::~H264EncoderImpl()
{
{
- unique_lock<mutex> lock(storage_task_queue_mutex);
- storage_thread_should_quit = true;
- storage_task_queue_changed.notify_all();
+ unique_lock<mutex> lock(frame_queue_mutex);
+ encode_thread_should_quit = true;
+ frame_queue_nonempty.notify_all();
}
+ encode_thread.join();
{
- unique_lock<mutex> lock(frame_queue_mutex);
- copy_thread_should_quit = true;
+ unique_lock<mutex> lock(storage_task_queue_mutex);
+ storage_thread_should_quit = true;
frame_queue_nonempty.notify_all();
+ storage_task_queue_changed.notify_all();
}
storage_thread.join();
- copy_thread.join();
release_encode();
deinit_va();
}
-bool H264Encoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
+bool H264EncoderImpl::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
{
{
// Wait until this frame slot is done encoding.
return true;
}
-void H264Encoder::add_audio(int64_t pts, vector<float> audio)
+void H264EncoderImpl::add_audio(int64_t pts, vector<float> audio)
{
{
unique_lock<mutex> lock(frame_queue_mutex);
frame_queue_nonempty.notify_all();
}
-void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const vector<RefCountedFrame> &input_frames)
+void H264EncoderImpl::end_frame(RefCountedGLsync fence, int64_t pts, const vector<RefCountedFrame> &input_frames)
{
{
unique_lock<mutex> lock(frame_queue_mutex);
frame_queue_nonempty.notify_all();
}
-void H264Encoder::copy_thread_func()
+void H264EncoderImpl::encode_thread_func()
{
int64_t last_dts = -1;
- for ( ;; ) {
+ int gop_start_display_frame_num = 0;
+ for (int encoding_frame_num = 0; ; ++encoding_frame_num) {
PendingFrame frame;
int pts_lag;
- encoding2display_order(current_frame_encoding, intra_period, intra_idr_period, ip_period,
- ¤t_frame_display, ¤t_frame_type, &pts_lag);
- if (current_frame_type == FRAME_IDR) {
+ int frame_type, display_frame_num;
+ encoding2display_order(encoding_frame_num, intra_period, intra_idr_period, ip_period,
+ &display_frame_num, &frame_type, &pts_lag);
+ if (frame_type == FRAME_IDR) {
numShortTerm = 0;
current_frame_num = 0;
- current_IDR_display = current_frame_display;
+ gop_start_display_frame_num = display_frame_num;
}
{
unique_lock<mutex> lock(frame_queue_mutex);
- frame_queue_nonempty.wait(lock, [this]{ return copy_thread_should_quit || pending_video_frames.count(current_frame_display) != 0; });
- if (copy_thread_should_quit) {
+ frame_queue_nonempty.wait(lock, [this, display_frame_num]{
+ return encode_thread_should_quit || pending_video_frames.count(display_frame_num) != 0;
+ });
+ if (encode_thread_should_quit && pending_video_frames.count(display_frame_num) == 0) {
+ // We have queued frames that were supposed to be B-frames,
+ // but will be no P-frame to encode them against. Encode them all
+ // as P-frames instead. Note that this happens under the mutex,
+ // but nobody else uses it at this point, since we're shutting down,
+ // so there's no contention.
+ encode_remaining_frames_as_p(encoding_frame_num, gop_start_display_frame_num, last_dts);
return;
} else {
- frame = move(pending_video_frames[current_frame_display]);
- pending_video_frames.erase(current_frame_display);
+ frame = move(pending_video_frames[display_frame_num]);
+ pending_video_frames.erase(display_frame_num);
}
}
}
last_dts = dts;
- encode_frame(frame, frame.pts, dts);
- ++current_frame_encoding;
+ encode_frame(frame, encoding_frame_num, display_frame_num, gop_start_display_frame_num, frame_type, frame.pts, dts);
}
}
-void H264Encoder::encode_frame(H264Encoder::PendingFrame frame, int64_t pts, int64_t dts)
+void H264EncoderImpl::encode_remaining_frames_as_p(int encoding_frame_num, int gop_start_display_frame_num, int64_t last_dts)
+{
+ if (pending_video_frames.empty()) {
+ return;
+ }
+
+ for (auto &pending_frame : pending_video_frames) {
+ int display_frame_num = pending_frame.first;
+ assert(display_frame_num > 0);
+ PendingFrame frame = move(pending_frame.second);
+ int64_t dts = last_dts + (TIMEBASE / MAX_FPS);
+ printf("Finalizing encode: Encoding leftover frame %d as P-frame instead of B-frame.\n", display_frame_num);
+ encode_frame(frame, encoding_frame_num++, display_frame_num, gop_start_display_frame_num, FRAME_P, frame.pts, dts);
+ last_dts = dts;
+ }
+}
+
+void H264EncoderImpl::encode_frame(H264EncoderImpl::PendingFrame frame, int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num,
+ int frame_type, int64_t pts, int64_t dts)
{
// Wait for the GPU to be done with the frame.
glClientWaitSync(frame.fence.get(), 0, 0);
frame.input_frames.clear();
// Unmap the image.
- GLSurface *surf = &gl_surfaces[current_frame_display % SURFACE_NUM];
+ GLSurface *surf = &gl_surfaces[display_frame_num % SURFACE_NUM];
eglDestroyImageKHR(eglGetCurrentDisplay(), surf->y_egl_image);
eglDestroyImageKHR(eglGetCurrentDisplay(), surf->cbcr_egl_image);
VAStatus va_status = vaReleaseBufferHandle(va_dpy, surf->surface_image.buf);
va_status = vaBeginPicture(va_dpy, context_id, surface);
CHECK_VASTATUS(va_status, "vaBeginPicture");
- if (current_frame_type == FRAME_IDR) {
+ if (frame_type == FRAME_IDR) {
render_sequence();
- render_picture();
+ render_picture(frame_type, display_frame_num, gop_start_display_frame_num);
if (h264_packedheader) {
render_packedsequence();
render_packedpicture();
}
} else {
//render_sequence();
- render_picture();
+ render_picture(frame_type, display_frame_num, gop_start_display_frame_num);
}
- render_slice();
+ render_slice(encoding_frame_num, display_frame_num, gop_start_display_frame_num, frame_type);
va_status = vaEndPicture(va_dpy, context_id);
CHECK_VASTATUS(va_status, "vaEndPicture");
// so now the data is done encoding (well, async job kicked off)...
// we send that to the storage thread
storage_task tmp;
- tmp.display_order = current_frame_display;
- tmp.frame_type = current_frame_type;
+ tmp.display_order = display_frame_num;
+ tmp.frame_type = frame_type;
tmp.pts = pts;
tmp.dts = dts;
storage_task_enqueue(move(tmp));
- update_ReferenceFrames();
+ update_ReferenceFrames(frame_type);
+}
+
+// Proxy object.
+H264Encoder::H264Encoder(QSurface *surface, int width, int height, HTTPD *httpd)
+ : impl(new H264EncoderImpl(surface, width, height, httpd)) {}
+
+// Must be defined here because unique_ptr<> destructor needs to know the impl.
+H264Encoder::~H264Encoder() {}
+
+void H264Encoder::add_audio(int64_t pts, std::vector<float> audio)
+{
+ impl->add_audio(pts, audio);
}
+
+bool H264Encoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
+{
+ return impl->begin_frame(y_tex, cbcr_tex);
+}
+
+void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const std::vector<RefCountedFrame> &input_frames)
+{
+ impl->end_frame(fence, pts, input_frames);
+}
+
+// Real class.