#include "httpd.h"
#include "timebase.h"
+using namespace std;
+
class QOpenGLContext;
class QSurface;
static int h264_maxref = (1<<16|1);
static int h264_entropy_mode = 1; /* cabac */
-static char *coded_fn = NULL;
-
static int frame_width = 176;
static int frame_height = 144;
static int frame_width_mbaligned;
static int frame_height_mbaligned;
-static int frame_rate = FPS;
static unsigned int frame_bitrate = 0;
-static unsigned int frame_slices = 1;
static double frame_size = 0;
static int initial_qp = 15;
//static int initial_qp = 28;
static int minimal_qp = 0;
static int intra_period = 30;
-static int intra_idr_period = FPS;
+static int intra_idr_period = MAX_FPS; // About a second; more at lower frame rates. Not ideal.
static int ip_period = 3;
static int rc_mode = -1;
static int rc_default_modes[] = {
VA_RC_VCM,
VA_RC_NONE,
};
-static unsigned long long current_frame_encoding = 0;
static unsigned long long current_frame_display = 0;
static unsigned long long current_IDR_display = 0;
static unsigned int current_frame_num = 0;
//
// Getting pts and dts right with variable frame rate (VFR) and B-frames can be a
// bit tricky. We assume first of all that the frame rate never goes _above_
-// <frame_rate>, which gives us a frame period N. The decoder can always decode
+// MAX_FPS, which gives us a frame period N. The decoder can always decode
// in at least this speed, as long at dts <= pts (the frame is not attempted
// presented before it is decoded). Furthermore, we never have longer chains of
// B-frames than a fixed constant C. (In a B-frame chain, we say that the base
*displaying_order = encoding_order;
// IDR frames are a special case; I honestly can't find the logic behind
// why this is the right thing, but it seems to line up nicely in practice :-)
- *pts_lag = TIMEBASE / frame_rate;
+ *pts_lag = TIMEBASE / MAX_FPS;
} else if (((encoding_order_gop - 1) % ip_period) != 0) { /* B frames */
*frame_type = FRAME_B;
*displaying_order = encoding_order - 1;
}
if (frame_bitrate == 0)
- frame_bitrate = frame_width * frame_height * 12 * frame_rate / 50;
+ frame_bitrate = frame_width * frame_height * 12 * MAX_FPS / 50;
if (coded_fn == NULL) {
struct stat buf;
} else {
switch (h264_profile) {
case VAProfileH264Baseline:
- printf("Use profile VAProfileH264Baseline\n");
ip_period = 1;
constraint_set_flag |= (1 << 0); /* Annex A.2.1 */
h264_entropy_mode = 0;
break;
case VAProfileH264ConstrainedBaseline:
- printf("Use profile VAProfileH264ConstrainedBaseline\n");
constraint_set_flag |= (1 << 0 | 1 << 1); /* Annex A.2.2 */
ip_period = 1;
break;
case VAProfileH264Main:
- printf("Use profile VAProfileH264Main\n");
constraint_set_flag |= (1 << 1); /* Annex A.2.2 */
break;
case VAProfileH264High:
constraint_set_flag |= (1 << 3); /* Annex A.2.4 */
- printf("Use profile VAProfileH264High\n");
break;
default:
- printf("unknow profile. Set to Baseline");
h264_profile = VAProfileH264Baseline;
ip_period = 1;
constraint_set_flag |= (1 << 0); /* Annex A.2.1 */
if (attrib[VAConfigAttribRateControl].value != VA_ATTRIB_NOT_SUPPORTED) {
int tmp = attrib[VAConfigAttribRateControl].value;
- printf("Support rate control mode (0x%x):", tmp);
-
- if (tmp & VA_RC_NONE)
- printf("NONE ");
- if (tmp & VA_RC_CBR)
- printf("CBR ");
- if (tmp & VA_RC_VBR)
- printf("VBR ");
- if (tmp & VA_RC_VCM)
- printf("VCM ");
- if (tmp & VA_RC_CQP)
- printf("CQP ");
- if (tmp & VA_RC_VBR_CONSTRAINED)
- printf("VBR_CONSTRAINED ");
-
- printf("\n");
-
if (rc_mode == -1 || !(rc_mode & tmp)) {
if (rc_mode != -1) {
printf("Warning: Don't support the specified RateControl mode: %s!!!, switch to ", rc_to_string(rc_mode));
break;
}
}
-
- printf("RateControl mode: %s\n", rc_to_string(rc_mode));
}
config_attrib[config_attrib_num].type = VAConfigAttribRateControl;
if (attrib[VAConfigAttribEncPackedHeaders].value != VA_ATTRIB_NOT_SUPPORTED) {
int tmp = attrib[VAConfigAttribEncPackedHeaders].value;
- printf("Support VAConfigAttribEncPackedHeaders\n");
-
h264_packedheader = 1;
config_attrib[config_attrib_num].type = VAConfigAttribEncPackedHeaders;
config_attrib[config_attrib_num].value = VA_ENC_PACKED_HEADER_NONE;
if (tmp & VA_ENC_PACKED_HEADER_SEQUENCE) {
- printf("Support packed sequence headers\n");
config_attrib[config_attrib_num].value |= VA_ENC_PACKED_HEADER_SEQUENCE;
}
if (tmp & VA_ENC_PACKED_HEADER_PICTURE) {
- printf("Support packed picture headers\n");
config_attrib[config_attrib_num].value |= VA_ENC_PACKED_HEADER_PICTURE;
}
if (tmp & VA_ENC_PACKED_HEADER_SLICE) {
- printf("Support packed slice headers\n");
config_attrib[config_attrib_num].value |= VA_ENC_PACKED_HEADER_SLICE;
}
if (tmp & VA_ENC_PACKED_HEADER_MISC) {
- printf("Support packed misc headers\n");
config_attrib[config_attrib_num].value |= VA_ENC_PACKED_HEADER_MISC;
}
}
if (attrib[VAConfigAttribEncInterlaced].value != VA_ATTRIB_NOT_SUPPORTED) {
- int tmp = attrib[VAConfigAttribEncInterlaced].value;
-
- printf("Support VAConfigAttribEncInterlaced\n");
-
- if (tmp & VA_ENC_INTERLACED_FRAME)
- printf("support VA_ENC_INTERLACED_FRAME\n");
- if (tmp & VA_ENC_INTERLACED_FIELD)
- printf("Support VA_ENC_INTERLACED_FIELD\n");
- if (tmp & VA_ENC_INTERLACED_MBAFF)
- printf("Support VA_ENC_INTERLACED_MBAFF\n");
- if (tmp & VA_ENC_INTERLACED_PAFF)
- printf("Support VA_ENC_INTERLACED_PAFF\n");
-
config_attrib[config_attrib_num].type = VAConfigAttribEncInterlaced;
config_attrib[config_attrib_num].value = VA_ENC_PACKED_HEADER_NONE;
config_attrib_num++;
if (attrib[VAConfigAttribEncMaxRefFrames].value != VA_ATTRIB_NOT_SUPPORTED) {
h264_maxref = attrib[VAConfigAttribEncMaxRefFrames].value;
-
- printf("Support %d RefPicList0 and %d RefPicList1\n",
- h264_maxref & 0xffff, (h264_maxref >> 16) & 0xffff );
- }
-
- if (attrib[VAConfigAttribEncMaxSlices].value != VA_ATTRIB_NOT_SUPPORTED)
- printf("Support %d slices\n", attrib[VAConfigAttribEncMaxSlices].value);
-
- if (attrib[VAConfigAttribEncSliceStructure].value != VA_ATTRIB_NOT_SUPPORTED) {
- int tmp = attrib[VAConfigAttribEncSliceStructure].value;
-
- printf("Support VAConfigAttribEncSliceStructure\n");
-
- if (tmp & VA_ENC_SLICE_STRUCTURE_ARBITRARY_ROWS)
- printf("Support VA_ENC_SLICE_STRUCTURE_ARBITRARY_ROWS\n");
- if (tmp & VA_ENC_SLICE_STRUCTURE_POWER_OF_TWO_ROWS)
- printf("Support VA_ENC_SLICE_STRUCTURE_POWER_OF_TWO_ROWS\n");
- if (tmp & VA_ENC_SLICE_STRUCTURE_ARBITRARY_MACROBLOCKS)
- printf("Support VA_ENC_SLICE_STRUCTURE_ARBITRARY_MACROBLOCKS\n");
- }
- if (attrib[VAConfigAttribEncMacroblockInfo].value != VA_ATTRIB_NOT_SUPPORTED) {
- printf("Support VAConfigAttribEncMacroblockInfo\n");
}
free(entrypoints);
free(packedslice_buffer);
}
-static int render_slice(void)
+static int render_slice(int encoding_frame_num)
{
VABufferID slice_param_buf;
VAStatus va_status;
slice_param.num_macroblocks = frame_width_mbaligned * frame_height_mbaligned/(16*16); /* Measured by MB */
slice_param.slice_type = (current_frame_type == FRAME_IDR)?2:current_frame_type;
if (current_frame_type == FRAME_IDR) {
- if (current_frame_encoding != 0)
+ if (encoding_frame_num != 0)
++slice_param.idr_pic_id;
} else if (current_frame_type == FRAME_P) {
int refpiclist0_max = h264_maxref & 0xffff;
-int H264Encoder::save_codeddata(storage_task task)
+void H264Encoder::save_codeddata(storage_task task)
{
VACodedBufferSegment *buf_list = NULL;
VAStatus va_status;
string data;
- const int64_t global_delay = (ip_period - 1) * (TIMEBASE / frame_rate); // So we never get negative dts.
+ const int64_t global_delay = (ip_period - 1) * (TIMEBASE / MAX_FPS); // So we never get negative dts.
va_status = vaMapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list));
CHECK_VASTATUS(va_status, "vaMapBuffer");
httpd->add_packet(pkt, task.pts + global_delay, task.dts + global_delay);
}
// Encode and add all audio frames up to and including the pts of this video frame.
- // (They can never be queued to us after the video frame they belong to, only before.)
for ( ;; ) {
int64_t audio_pts;
- std::vector<float> audio;
+ vector<float> audio;
{
unique_lock<mutex> lock(frame_queue_mutex);
- if (pending_audio_frames.empty()) break;
+ frame_queue_nonempty.wait(lock, [this]{ return copy_thread_should_quit || !pending_audio_frames.empty(); });
+ if (copy_thread_should_quit) return;
auto it = pending_audio_frames.begin();
if (it->first > task.pts) break;
audio_pts = it->first;
frame->channel_layout = AV_CH_LAYOUT_STEREO;
unique_ptr<int32_t[]> int_samples(new int32_t[audio.size()]);
- avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), audio.size() * sizeof(int32_t), 0);
+ int ret = avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), audio.size() * sizeof(int32_t), 1);
+ if (ret < 0) {
+ fprintf(stderr, "avcodec_fill_audio_frame() failed with %d\n", ret);
+ exit(1);
+ }
for (int i = 0; i < frame->nb_samples * 2; ++i) {
if (audio[i] >= 1.0f) {
int_samples[i] = 2147483647;
// TODO: Delayed frames.
avcodec_free_frame(&frame);
av_free_packet(&pkt);
+ if (audio_pts == task.pts) break;
}
#if 0
printf("%08lld", encode_order);
printf("(%06d bytes coded)", coded_size);
#endif
-
- return 0;
}
// this is weird. but it seems to put a new frame onto the queue
void H264Encoder::storage_task_enqueue(storage_task task)
{
- std::unique_lock<std::mutex> lock(storage_task_queue_mutex);
+ unique_lock<mutex> lock(storage_task_queue_mutex);
storage_task_queue.push(move(task));
srcsurface_status[task.display_order % SURFACE_NUM] = SRC_SURFACE_IN_ENCODING;
storage_task_queue_changed.notify_all();
storage_task current;
{
// wait until there's an encoded frame
- std::unique_lock<std::mutex> lock(storage_task_queue_mutex);
+ unique_lock<mutex> lock(storage_task_queue_mutex);
storage_task_queue_changed.wait(lock, [this]{ return storage_thread_should_quit || !storage_task_queue.empty(); });
if (storage_thread_should_quit) return;
current = move(storage_task_queue.front());
save_codeddata(move(current));
{
- std::unique_lock<std::mutex> lock(storage_task_queue_mutex);
+ unique_lock<mutex> lock(storage_task_queue_mutex);
srcsurface_status[current.display_order % SURFACE_NUM] = SRC_SURFACE_FREE;
storage_task_queue_changed.notify_all();
}
}
-static int print_input()
-{
- printf("\n\nINPUT:Try to encode H264...\n");
- if (rc_mode != -1)
- printf("INPUT: RateControl : %s\n", rc_to_string(rc_mode));
- printf("INPUT: Resolution : %dx%dframes\n", frame_width, frame_height);
- printf("INPUT: FrameRate : %d\n", frame_rate);
- printf("INPUT: Bitrate : %d\n", frame_bitrate);
- printf("INPUT: Slieces : %d\n", frame_slices);
- printf("INPUT: IntraPeriod : %d\n", intra_period);
- printf("INPUT: IDRPeriod : %d\n", intra_idr_period);
- printf("INPUT: IpPeriod : %d\n", ip_period);
- printf("INPUT: Initial QP : %d\n", initial_qp);
- printf("INPUT: Min QP : %d\n", minimal_qp);
- printf("INPUT: Coded Clip : %s\n", coded_fn);
-
- printf("\n\n"); /* return back to startpoint */
-
- return 0;
-}
-
H264Encoder::H264Encoder(QSurface *surface, int width, int height, HTTPD *httpd)
: current_storage_frame(0), surface(surface), httpd(httpd)
{
frame_width_mbaligned = (frame_width + 15) & (~15);
frame_height_mbaligned = (frame_height + 15) & (~15);
frame_bitrate = 15000000; // / 60;
- current_frame_encoding = 0;
- print_input();
+ //print_input();
init_va();
setup_encode();
memset(&pic_param, 0, sizeof(pic_param));
memset(&slice_param, 0, sizeof(slice_param));
- storage_thread = std::thread(&H264Encoder::storage_task_thread, this);
+ storage_thread = thread(&H264Encoder::storage_task_thread, this);
- copy_thread = std::thread([this]{
+ copy_thread = thread([this]{
//SDL_GL_MakeCurrent(window, context);
QOpenGLContext *context = create_context(this->surface);
eglBindAPI(EGL_OPENGL_API);
{
unique_lock<mutex> lock(frame_queue_mutex);
copy_thread_should_quit = true;
- frame_queue_nonempty.notify_one();
+ frame_queue_nonempty.notify_all();
}
storage_thread.join();
copy_thread.join();
{
{
// Wait until this frame slot is done encoding.
- std::unique_lock<std::mutex> lock(storage_task_queue_mutex);
+ unique_lock<mutex> lock(storage_task_queue_mutex);
storage_task_queue_changed.wait(lock, [this]{ return storage_thread_should_quit || (srcsurface_status[current_storage_frame % SURFACE_NUM] == SRC_SURFACE_FREE); });
if (storage_thread_should_quit) return false;
}
return true;
}
-void H264Encoder::add_audio(int64_t pts, std::vector<float> audio)
+void H264Encoder::add_audio(int64_t pts, vector<float> audio)
{
{
unique_lock<mutex> lock(frame_queue_mutex);
pending_audio_frames[pts] = move(audio);
}
- frame_queue_nonempty.notify_one();
+ frame_queue_nonempty.notify_all();
}
-
-void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const std::vector<RefCountedFrame> &input_frames)
+void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const vector<RefCountedFrame> &input_frames)
{
{
unique_lock<mutex> lock(frame_queue_mutex);
pending_video_frames[current_storage_frame] = PendingFrame{ fence, input_frames, pts };
++current_storage_frame;
}
- frame_queue_nonempty.notify_one();
+ frame_queue_nonempty.notify_all();
}
void H264Encoder::copy_thread_func()
{
int64_t last_dts = -1;
- for ( ;; ) {
+ for (int encoding_frame_num = 0; ; ++encoding_frame_num) {
PendingFrame frame;
int pts_lag;
- encoding2display_order(current_frame_encoding, intra_period, intra_idr_period, ip_period,
+ encoding2display_order(encoding_frame_num, intra_period, intra_idr_period, ip_period,
¤t_frame_display, ¤t_frame_type, &pts_lag);
if (current_frame_type == FRAME_IDR) {
numShortTerm = 0;
{
unique_lock<mutex> lock(frame_queue_mutex);
frame_queue_nonempty.wait(lock, [this]{ return copy_thread_should_quit || pending_video_frames.count(current_frame_display) != 0; });
- if (copy_thread_should_quit) return;
- frame = move(pending_video_frames[current_frame_display]);
- pending_video_frames.erase(current_frame_display);
- }
-
- // Wait for the GPU to be done with the frame.
- glClientWaitSync(frame.fence.get(), 0, 0);
-
- // Release back any input frames we needed to render this frame.
- frame.input_frames.clear();
-
- // Unmap the image.
- GLSurface *surf = &gl_surfaces[current_frame_display % SURFACE_NUM];
- eglDestroyImageKHR(eglGetCurrentDisplay(), surf->y_egl_image);
- eglDestroyImageKHR(eglGetCurrentDisplay(), surf->cbcr_egl_image);
- VAStatus va_status = vaReleaseBufferHandle(va_dpy, surf->surface_image.buf);
- CHECK_VASTATUS(va_status, "vaReleaseBufferHandle");
- va_status = vaDestroyImage(va_dpy, surf->surface_image.image_id);
- CHECK_VASTATUS(va_status, "vaDestroyImage");
-
- VASurfaceID surface = surf->src_surface;
-
- // Schedule the frame for encoding.
- va_status = vaBeginPicture(va_dpy, context_id, surface);
- CHECK_VASTATUS(va_status, "vaBeginPicture");
-
- if (current_frame_type == FRAME_IDR) {
- render_sequence();
- render_picture();
- if (h264_packedheader) {
- render_packedsequence();
- render_packedpicture();
+ if (copy_thread_should_quit) {
+ return;
+ } else {
+ frame = move(pending_video_frames[current_frame_display]);
+ pending_video_frames.erase(current_frame_display);
}
- } else {
- //render_sequence();
- render_picture();
}
- render_slice();
-
- va_status = vaEndPicture(va_dpy, context_id);
- CHECK_VASTATUS(va_status, "vaEndPicture");
- // Determine the pts and dts of this frame.
- int64_t pts = frame.pts;
+ // Determine the dts of this frame.
int64_t dts;
if (pts_lag == -1) {
assert(last_dts != -1);
- dts = last_dts + (TIMEBASE / frame_rate);
+ dts = last_dts + (TIMEBASE / MAX_FPS);
} else {
- dts = pts - pts_lag;
+ dts = frame.pts - pts_lag;
}
last_dts = dts;
- // so now the data is done encoding (well, async job kicked off)...
- // we send that to the storage thread
- storage_task tmp;
- tmp.display_order = current_frame_display;
- tmp.frame_type = current_frame_type;
- tmp.pts = pts;
- tmp.dts = dts;
- storage_task_enqueue(move(tmp));
-
- update_ReferenceFrames();
- ++current_frame_encoding;
+ encode_frame(frame, encoding_frame_num, frame.pts, dts);
}
}
+
+void H264Encoder::encode_frame(H264Encoder::PendingFrame frame, int encoding_frame_num, int64_t pts, int64_t dts)
+{
+ // Wait for the GPU to be done with the frame.
+ glClientWaitSync(frame.fence.get(), 0, 0);
+
+ // Release back any input frames we needed to render this frame.
+ frame.input_frames.clear();
+
+ // Unmap the image.
+ GLSurface *surf = &gl_surfaces[current_frame_display % SURFACE_NUM];
+ eglDestroyImageKHR(eglGetCurrentDisplay(), surf->y_egl_image);
+ eglDestroyImageKHR(eglGetCurrentDisplay(), surf->cbcr_egl_image);
+ VAStatus va_status = vaReleaseBufferHandle(va_dpy, surf->surface_image.buf);
+ CHECK_VASTATUS(va_status, "vaReleaseBufferHandle");
+ va_status = vaDestroyImage(va_dpy, surf->surface_image.image_id);
+ CHECK_VASTATUS(va_status, "vaDestroyImage");
+
+ VASurfaceID surface = surf->src_surface;
+
+ // Schedule the frame for encoding.
+ va_status = vaBeginPicture(va_dpy, context_id, surface);
+ CHECK_VASTATUS(va_status, "vaBeginPicture");
+
+ if (current_frame_type == FRAME_IDR) {
+ render_sequence();
+ render_picture();
+ if (h264_packedheader) {
+ render_packedsequence();
+ render_packedpicture();
+ }
+ } else {
+ //render_sequence();
+ render_picture();
+ }
+ render_slice(encoding_frame_num);
+
+ va_status = vaEndPicture(va_dpy, context_id);
+ CHECK_VASTATUS(va_status, "vaEndPicture");
+
+ // so now the data is done encoding (well, async job kicked off)...
+ // we send that to the storage thread
+ storage_task tmp;
+ tmp.display_order = current_frame_display;
+ tmp.frame_type = current_frame_type;
+ tmp.pts = pts;
+ tmp.dts = dts;
+ storage_task_enqueue(move(tmp));
+
+ update_ReferenceFrames();
+}