]> git.sesse.net Git - nageru/blobdiff - h264encode.cpp
Small refactoring in H264Encoder::copy_thread_func().
[nageru] / h264encode.cpp
index ad688fc58ee87b59e5232b94ad75bc4c24b36ad1..4d7882a090f9f5d5c27953655b64c4d4d80023e9 100644 (file)
 #include <utility>
 
 #include "context.h"
+#include "defs.h"
 #include "httpd.h"
 #include "timebase.h"
 
+using namespace std;
+
 class QOpenGLContext;
 class QSurface;
 
@@ -107,21 +110,17 @@ static  int h264_packedheader = 0; /* support pack header? */
 static  int h264_maxref = (1<<16|1);
 static  int h264_entropy_mode = 1; /* cabac */
 
-static  char *coded_fn = NULL;
-
 static  int frame_width = 176;
 static  int frame_height = 144;
 static  int frame_width_mbaligned;
 static  int frame_height_mbaligned;
-static  int frame_rate = 60;
 static  unsigned int frame_bitrate = 0;
-static  unsigned int frame_slices = 1;
 static  double frame_size = 0;
 static  int initial_qp = 15;
 //static  int initial_qp = 28;
 static  int minimal_qp = 0;
 static  int intra_period = 30;
-static  int intra_idr_period = 60;
+static  int intra_idr_period = MAX_FPS;  // About a second; more at lower frame rates. Not ideal.
 static  int ip_period = 3;
 static  int rc_mode = -1;
 static  int rc_default_modes[] = {
@@ -154,6 +153,20 @@ typedef struct __bitstream bitstream;
 
 using namespace std;
 
+// Supposedly vaRenderPicture() is supposed to destroy the buffer implicitly,
+// but if we don't delete it here, we get leaks. The GStreamer implementation
+// does the same.
+static void render_picture_and_delete(VADisplay dpy, VAContextID context, VABufferID *buffers, int num_buffers)
+{
+    VAStatus va_status = vaRenderPicture(dpy, context, buffers, num_buffers);
+    CHECK_VASTATUS(va_status, "vaRenderPicture");
+
+    for (int i = 0; i < num_buffers; ++i) {
+        va_status = vaDestroyBuffer(va_dpy, buffers[i]);
+        CHECK_VASTATUS(va_status, "vaDestroyBuffer");
+    }
+}
+
 static unsigned int 
 va_swap32(unsigned int val)
 {
@@ -604,7 +617,7 @@ static int build_packed_slice_buffer(unsigned char **header_buffer)
 //
 // Getting pts and dts right with variable frame rate (VFR) and B-frames can be a
 // bit tricky. We assume first of all that the frame rate never goes _above_
-// <frame_rate>, which gives us a frame period N. The decoder can always decode
+// MAX_FPS, which gives us a frame period N. The decoder can always decode
 // in at least this speed, as long at dts <= pts (the frame is not attempted
 // presented before it is decoded). Furthermore, we never have longer chains of
 // B-frames than a fixed constant C. (In a B-frame chain, we say that the base
@@ -697,7 +710,7 @@ void encoding2display_order(
         *displaying_order = encoding_order;
         // IDR frames are a special case; I honestly can't find the logic behind
         // why this is the right thing, but it seems to line up nicely in practice :-)
-        *pts_lag = TIMEBASE / frame_rate;
+        *pts_lag = TIMEBASE / MAX_FPS;
     } else if (((encoding_order_gop - 1) % ip_period) != 0) { /* B frames */
         *frame_type = FRAME_B;
         *displaying_order = encoding_order - 1;
@@ -858,7 +871,7 @@ static int process_cmdline(int argc, char *argv[])
     }
 
     if (frame_bitrate == 0)
-        frame_bitrate = frame_width * frame_height * 12 * frame_rate / 50;
+        frame_bitrate = frame_width * frame_height * 12 * MAX_FPS / 50;
         
     if (coded_fn == NULL) {
         struct stat buf;
@@ -958,28 +971,23 @@ static int init_va(void)
     } else {
         switch (h264_profile) {
             case VAProfileH264Baseline:
-                printf("Use profile VAProfileH264Baseline\n");
                 ip_period = 1;
                 constraint_set_flag |= (1 << 0); /* Annex A.2.1 */
                 h264_entropy_mode = 0;
                 break;
             case VAProfileH264ConstrainedBaseline:
-                printf("Use profile VAProfileH264ConstrainedBaseline\n");
                 constraint_set_flag |= (1 << 0 | 1 << 1); /* Annex A.2.2 */
                 ip_period = 1;
                 break;
 
             case VAProfileH264Main:
-                printf("Use profile VAProfileH264Main\n");
                 constraint_set_flag |= (1 << 1); /* Annex A.2.2 */
                 break;
 
             case VAProfileH264High:
                 constraint_set_flag |= (1 << 3); /* Annex A.2.4 */
-                printf("Use profile VAProfileH264High\n");
                 break;
             default:
-                printf("unknow profile. Set to Baseline");
                 h264_profile = VAProfileH264Baseline;
                 ip_period = 1;
                 constraint_set_flag |= (1 << 0); /* Annex A.2.1 */
@@ -1009,23 +1017,6 @@ static int init_va(void)
     if (attrib[VAConfigAttribRateControl].value != VA_ATTRIB_NOT_SUPPORTED) {
         int tmp = attrib[VAConfigAttribRateControl].value;
 
-        printf("Support rate control mode (0x%x):", tmp);
-        
-        if (tmp & VA_RC_NONE)
-            printf("NONE ");
-        if (tmp & VA_RC_CBR)
-            printf("CBR ");
-        if (tmp & VA_RC_VBR)
-            printf("VBR ");
-        if (tmp & VA_RC_VCM)
-            printf("VCM ");
-        if (tmp & VA_RC_CQP)
-            printf("CQP ");
-        if (tmp & VA_RC_VBR_CONSTRAINED)
-            printf("VBR_CONSTRAINED ");
-
-        printf("\n");
-
         if (rc_mode == -1 || !(rc_mode & tmp))  {
             if (rc_mode != -1) {
                 printf("Warning: Don't support the specified RateControl mode: %s!!!, switch to ", rc_to_string(rc_mode));
@@ -1037,8 +1028,6 @@ static int init_va(void)
                     break;
                 }
             }
-
-            printf("RateControl mode: %s\n", rc_to_string(rc_mode));
         }
 
         config_attrib[config_attrib_num].type = VAConfigAttribRateControl;
@@ -1050,29 +1039,23 @@ static int init_va(void)
     if (attrib[VAConfigAttribEncPackedHeaders].value != VA_ATTRIB_NOT_SUPPORTED) {
         int tmp = attrib[VAConfigAttribEncPackedHeaders].value;
 
-        printf("Support VAConfigAttribEncPackedHeaders\n");
-        
         h264_packedheader = 1;
         config_attrib[config_attrib_num].type = VAConfigAttribEncPackedHeaders;
         config_attrib[config_attrib_num].value = VA_ENC_PACKED_HEADER_NONE;
         
         if (tmp & VA_ENC_PACKED_HEADER_SEQUENCE) {
-            printf("Support packed sequence headers\n");
             config_attrib[config_attrib_num].value |= VA_ENC_PACKED_HEADER_SEQUENCE;
         }
         
         if (tmp & VA_ENC_PACKED_HEADER_PICTURE) {
-            printf("Support packed picture headers\n");
             config_attrib[config_attrib_num].value |= VA_ENC_PACKED_HEADER_PICTURE;
         }
         
         if (tmp & VA_ENC_PACKED_HEADER_SLICE) {
-            printf("Support packed slice headers\n");
             config_attrib[config_attrib_num].value |= VA_ENC_PACKED_HEADER_SLICE;
         }
         
         if (tmp & VA_ENC_PACKED_HEADER_MISC) {
-            printf("Support packed misc headers\n");
             config_attrib[config_attrib_num].value |= VA_ENC_PACKED_HEADER_MISC;
         }
         
@@ -1081,19 +1064,6 @@ static int init_va(void)
     }
 
     if (attrib[VAConfigAttribEncInterlaced].value != VA_ATTRIB_NOT_SUPPORTED) {
-        int tmp = attrib[VAConfigAttribEncInterlaced].value;
-        
-        printf("Support VAConfigAttribEncInterlaced\n");
-
-        if (tmp & VA_ENC_INTERLACED_FRAME)
-            printf("support VA_ENC_INTERLACED_FRAME\n");
-        if (tmp & VA_ENC_INTERLACED_FIELD)
-            printf("Support VA_ENC_INTERLACED_FIELD\n");
-        if (tmp & VA_ENC_INTERLACED_MBAFF)
-            printf("Support VA_ENC_INTERLACED_MBAFF\n");
-        if (tmp & VA_ENC_INTERLACED_PAFF)
-            printf("Support VA_ENC_INTERLACED_PAFF\n");
-        
         config_attrib[config_attrib_num].type = VAConfigAttribEncInterlaced;
         config_attrib[config_attrib_num].value = VA_ENC_PACKED_HEADER_NONE;
         config_attrib_num++;
@@ -1101,28 +1071,6 @@ static int init_va(void)
     
     if (attrib[VAConfigAttribEncMaxRefFrames].value != VA_ATTRIB_NOT_SUPPORTED) {
         h264_maxref = attrib[VAConfigAttribEncMaxRefFrames].value;
-        
-        printf("Support %d RefPicList0 and %d RefPicList1\n",
-               h264_maxref & 0xffff, (h264_maxref >> 16) & 0xffff );
-    }
-
-    if (attrib[VAConfigAttribEncMaxSlices].value != VA_ATTRIB_NOT_SUPPORTED)
-        printf("Support %d slices\n", attrib[VAConfigAttribEncMaxSlices].value);
-
-    if (attrib[VAConfigAttribEncSliceStructure].value != VA_ATTRIB_NOT_SUPPORTED) {
-        int tmp = attrib[VAConfigAttribEncSliceStructure].value;
-        
-        printf("Support VAConfigAttribEncSliceStructure\n");
-
-        if (tmp & VA_ENC_SLICE_STRUCTURE_ARBITRARY_ROWS)
-            printf("Support VA_ENC_SLICE_STRUCTURE_ARBITRARY_ROWS\n");
-        if (tmp & VA_ENC_SLICE_STRUCTURE_POWER_OF_TWO_ROWS)
-            printf("Support VA_ENC_SLICE_STRUCTURE_POWER_OF_TWO_ROWS\n");
-        if (tmp & VA_ENC_SLICE_STRUCTURE_ARBITRARY_MACROBLOCKS)
-            printf("Support VA_ENC_SLICE_STRUCTURE_ARBITRARY_MACROBLOCKS\n");
-    }
-    if (attrib[VAConfigAttribEncMacroblockInfo].value != VA_ATTRIB_NOT_SUPPORTED) {
-        printf("Support VAConfigAttribEncMacroblockInfo\n");
     }
 
     free(entrypoints);
@@ -1370,8 +1318,7 @@ static int render_sequence(void)
     render_id[0] = seq_param_buf;
     render_id[1] = rc_param_buf;
     
-    va_status = vaRenderPicture(va_dpy, context_id, &render_id[0], 2);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");;
+    render_picture_and_delete(va_dpy, context_id, &render_id[0], 2);
 
     if (misc_priv_type != 0) {
         va_status = vaCreateBuffer(va_dpy, context_id,
@@ -1384,7 +1331,7 @@ static int render_sequence(void)
         misc_param_tmp->data[0] = misc_priv_value;
         vaUnmapBuffer(va_dpy, misc_param_tmpbuf);
     
-        va_status = vaRenderPicture(va_dpy, context_id, &misc_param_tmpbuf, 1);
+        render_picture_and_delete(va_dpy, context_id, &misc_param_tmpbuf, 1);
     }
     
     return 0;
@@ -1463,10 +1410,9 @@ static int render_picture(void)
 
     va_status = vaCreateBuffer(va_dpy, context_id, VAEncPictureParameterBufferType,
                                sizeof(pic_param), 1, &pic_param, &pic_param_buf);
-    CHECK_VASTATUS(va_status, "vaCreateBuffer");;
+    CHECK_VASTATUS(va_status, "vaCreateBuffer");
 
-    va_status = vaRenderPicture(va_dpy, context_id, &pic_param_buf, 1);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
+    render_picture_and_delete(va_dpy, context_id, &pic_param_buf, 1);
 
     return 0;
 }
@@ -1501,8 +1447,7 @@ static int render_packedsequence(void)
 
     render_id[0] = packedseq_para_bufid;
     render_id[1] = packedseq_data_bufid;
-    va_status = vaRenderPicture(va_dpy, context_id, render_id, 2);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
+    render_picture_and_delete(va_dpy, context_id, render_id, 2);
 
     free(packedseq_buffer);
     
@@ -1539,8 +1484,7 @@ static int render_packedpicture(void)
 
     render_id[0] = packedpic_para_bufid;
     render_id[1] = packedpic_data_bufid;
-    va_status = vaRenderPicture(va_dpy, context_id, render_id, 2);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
+    render_picture_and_delete(va_dpy, context_id, render_id, 2);
 
     free(packedpic_buffer);
     
@@ -1576,8 +1520,7 @@ static void render_packedslice()
 
     render_id[0] = packedslice_para_bufid;
     render_id[1] = packedslice_data_bufid;
-    va_status = vaRenderPicture(va_dpy, context_id, render_id, 2);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
+    render_picture_and_delete(va_dpy, context_id, render_id, 2);
 
     free(packedslice_buffer);
 }
@@ -1634,17 +1577,16 @@ static int render_slice(void)
 
     va_status = vaCreateBuffer(va_dpy, context_id, VAEncSliceParameterBufferType,
                                sizeof(slice_param), 1, &slice_param, &slice_param_buf);
-    CHECK_VASTATUS(va_status, "vaCreateBuffer");;
+    CHECK_VASTATUS(va_status, "vaCreateBuffer");
+
+    render_picture_and_delete(va_dpy, context_id, &slice_param_buf, 1);
 
-    va_status = vaRenderPicture(va_dpy, context_id, &slice_param_buf, 1);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
-    
     return 0;
 }
 
 
 
-int H264Encoder::save_codeddata(storage_task task)
+void H264Encoder::save_codeddata(storage_task task)
 {    
     VACodedBufferSegment *buf_list = NULL;
     VAStatus va_status;
@@ -1652,7 +1594,7 @@ int H264Encoder::save_codeddata(storage_task task)
 
     string data;
 
-    const int64_t global_delay = (ip_period - 1) * (TIMEBASE / frame_rate);  // So we never get negative dts.
+    const int64_t global_delay = (ip_period - 1) * (TIMEBASE / MAX_FPS);  // So we never get negative dts.
 
     va_status = vaMapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list));
     CHECK_VASTATUS(va_status, "vaMapBuffer");
@@ -1681,13 +1623,13 @@ int H264Encoder::save_codeddata(storage_task task)
         httpd->add_packet(pkt, task.pts + global_delay, task.dts + global_delay);
     }
     // Encode and add all audio frames up to and including the pts of this video frame.
-    // (They can never be queued to us after the video frame they belong to, only before.)
     for ( ;; ) {
         int64_t audio_pts;
-        std::vector<float> audio;
+        vector<float> audio;
         {
              unique_lock<mutex> lock(frame_queue_mutex);
-             if (pending_audio_frames.empty()) break;
+             frame_queue_nonempty.wait(lock, [this]{ return copy_thread_should_quit || !pending_audio_frames.empty(); });
+             if (copy_thread_should_quit) return;
              auto it = pending_audio_frames.begin();
              if (it->first > task.pts) break;
              audio_pts = it->first;
@@ -1697,14 +1639,23 @@ int H264Encoder::save_codeddata(storage_task task)
 
         AVFrame *frame = avcodec_alloc_frame();
         frame->nb_samples = audio.size() / 2;
-        frame->format = AV_SAMPLE_FMT_FLT;
+        frame->format = AV_SAMPLE_FMT_S32;
         frame->channel_layout = AV_CH_LAYOUT_STEREO;
 
-        unique_ptr<float[]> planar_samples(new float[audio.size()]);
-        avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_FLTP, (const uint8_t*)planar_samples.get(), audio.size() * sizeof(float), 0);
-        for (int i = 0; i < frame->nb_samples; ++i) {
-            planar_samples[i] = audio[i * 2 + 0];
-            planar_samples[i + frame->nb_samples] = audio[i * 2 + 1];
+        unique_ptr<int32_t[]> int_samples(new int32_t[audio.size()]);
+        int ret = avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), audio.size() * sizeof(int32_t), 1);
+        if (ret < 0) {
+            fprintf(stderr, "avcodec_fill_audio_frame() failed with %d\n", ret);
+            exit(1);
+        }
+        for (int i = 0; i < frame->nb_samples * 2; ++i) {
+            if (audio[i] >= 1.0f) {
+                int_samples[i] = 2147483647;
+            } else if (audio[i] <= -1.0f) {
+                int_samples[i] = -2147483647;
+            } else {
+                int_samples[i] = lrintf(audio[i] * 2147483647.0f);
+            }
         }
 
         AVPacket pkt;
@@ -1719,6 +1670,8 @@ int H264Encoder::save_codeddata(storage_task task)
         }
         // TODO: Delayed frames.
         avcodec_free_frame(&frame);
+        av_free_packet(&pkt);
+        if (audio_pts == task.pts) break;
     }
 
 #if 0
@@ -1740,15 +1693,13 @@ int H264Encoder::save_codeddata(storage_task task)
     printf("%08lld", encode_order);
     printf("(%06d bytes coded)", coded_size);
 #endif
-
-    return 0;
 }
 
 
 // this is weird. but it seems to put a new frame onto the queue
 void H264Encoder::storage_task_enqueue(storage_task task)
 {
-       std::unique_lock<std::mutex> lock(storage_task_queue_mutex);
+       unique_lock<mutex> lock(storage_task_queue_mutex);
        storage_task_queue.push(move(task));
        srcsurface_status[task.display_order % SURFACE_NUM] = SRC_SURFACE_IN_ENCODING;
        storage_task_queue_changed.notify_all();
@@ -1760,7 +1711,7 @@ void H264Encoder::storage_task_thread()
                storage_task current;
                {
                        // wait until there's an encoded frame  
-                       std::unique_lock<std::mutex> lock(storage_task_queue_mutex);
+                       unique_lock<mutex> lock(storage_task_queue_mutex);
                        storage_task_queue_changed.wait(lock, [this]{ return storage_thread_should_quit || !storage_task_queue.empty(); });
                        if (storage_thread_should_quit) return;
                        current = move(storage_task_queue.front());
@@ -1775,7 +1726,7 @@ void H264Encoder::storage_task_thread()
                save_codeddata(move(current));
 
                {
-                       std::unique_lock<std::mutex> lock(storage_task_queue_mutex);
+                       unique_lock<mutex> lock(storage_task_queue_mutex);
                        srcsurface_status[current.display_order % SURFACE_NUM] = SRC_SURFACE_FREE;
                        storage_task_queue_changed.notify_all();
                }
@@ -1808,35 +1759,14 @@ static int deinit_va()
 }
 
 
-static int print_input()
-{
-    printf("\n\nINPUT:Try to encode H264...\n");
-    if (rc_mode != -1)
-        printf("INPUT: RateControl  : %s\n", rc_to_string(rc_mode));
-    printf("INPUT: Resolution   : %dx%dframes\n", frame_width, frame_height);
-    printf("INPUT: FrameRate    : %d\n", frame_rate);
-    printf("INPUT: Bitrate      : %d\n", frame_bitrate);
-    printf("INPUT: Slieces      : %d\n", frame_slices);
-    printf("INPUT: IntraPeriod  : %d\n", intra_period);
-    printf("INPUT: IDRPeriod    : %d\n", intra_idr_period);
-    printf("INPUT: IpPeriod     : %d\n", ip_period);
-    printf("INPUT: Initial QP   : %d\n", initial_qp);
-    printf("INPUT: Min QP       : %d\n", minimal_qp);
-    printf("INPUT: Coded Clip   : %s\n", coded_fn);
-    
-    printf("\n\n"); /* return back to startpoint */
-    
-    return 0;
-}
-
 H264Encoder::H264Encoder(QSurface *surface, int width, int height, HTTPD *httpd)
        : current_storage_frame(0), surface(surface), httpd(httpd)
 {
-       AVCodec *codec_audio = avcodec_find_encoder(AV_CODEC_ID_MP3);
+       AVCodec *codec_audio = avcodec_find_encoder(AUDIO_OUTPUT_CODEC);
        context_audio = avcodec_alloc_context3(codec_audio);
-       context_audio->bit_rate = 256000;
-       context_audio->sample_rate = 48000;
-       context_audio->sample_fmt = AV_SAMPLE_FMT_FLTP;
+       context_audio->bit_rate = AUDIO_OUTPUT_BIT_RATE;
+       context_audio->sample_rate = OUTPUT_FREQUENCY;
+       context_audio->sample_fmt = AUDIO_OUTPUT_SAMPLE_FMT;
        context_audio->channels = 2;
        context_audio->channel_layout = AV_CH_LAYOUT_STEREO;
        context_audio->time_base = AVRational{1, TIMEBASE};
@@ -1852,7 +1782,7 @@ H264Encoder::H264Encoder(QSurface *surface, int width, int height, HTTPD *httpd)
         frame_bitrate = 15000000;  // / 60;
        current_frame_encoding = 0;
 
-       print_input();
+       //print_input();
 
        init_va();
        setup_encode();
@@ -1864,9 +1794,9 @@ H264Encoder::H264Encoder(QSurface *surface, int width, int height, HTTPD *httpd)
        memset(&pic_param, 0, sizeof(pic_param));
        memset(&slice_param, 0, sizeof(slice_param));
 
-       storage_thread = std::thread(&H264Encoder::storage_task_thread, this);
+       storage_thread = thread(&H264Encoder::storage_task_thread, this);
 
-       copy_thread = std::thread([this]{
+       copy_thread = thread([this]{
                //SDL_GL_MakeCurrent(window, context);
                QOpenGLContext *context = create_context(this->surface);
                eglBindAPI(EGL_OPENGL_API);
@@ -1889,7 +1819,7 @@ H264Encoder::~H264Encoder()
        {
                unique_lock<mutex> lock(frame_queue_mutex);
                copy_thread_should_quit = true;
-               frame_queue_nonempty.notify_one();
+               frame_queue_nonempty.notify_all();
        }
        storage_thread.join();
        copy_thread.join();
@@ -1902,7 +1832,7 @@ bool H264Encoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
 {
        {
                // Wait until this frame slot is done encoding.
-               std::unique_lock<std::mutex> lock(storage_task_queue_mutex);
+               unique_lock<mutex> lock(storage_task_queue_mutex);
                storage_task_queue_changed.wait(lock, [this]{ return storage_thread_should_quit || (srcsurface_status[current_storage_frame % SURFACE_NUM] == SRC_SURFACE_FREE); });
                if (storage_thread_should_quit) return false;
        }
@@ -1962,24 +1892,23 @@ bool H264Encoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
        return true;
 }
 
-void H264Encoder::add_audio(int64_t pts, std::vector<float> audio)
+void H264Encoder::add_audio(int64_t pts, vector<float> audio)
 {
        {
                unique_lock<mutex> lock(frame_queue_mutex);
                pending_audio_frames[pts] = move(audio);
        }
-       frame_queue_nonempty.notify_one();
+       frame_queue_nonempty.notify_all();
 }
 
-
-void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const std::vector<RefCountedFrame> &input_frames)
+void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const vector<RefCountedFrame> &input_frames)
 {
        {
                unique_lock<mutex> lock(frame_queue_mutex);
                pending_video_frames[current_storage_frame] = PendingFrame{ fence, input_frames, pts };
                ++current_storage_frame;
        }
-       frame_queue_nonempty.notify_one();
+       frame_queue_nonempty.notify_all();
 }
 
 void H264Encoder::copy_thread_func()
@@ -1999,69 +1928,76 @@ void H264Encoder::copy_thread_func()
                {
                        unique_lock<mutex> lock(frame_queue_mutex);
                        frame_queue_nonempty.wait(lock, [this]{ return copy_thread_should_quit || pending_video_frames.count(current_frame_display) != 0; });
-                       if (copy_thread_should_quit) return;
-                       frame = move(pending_video_frames[current_frame_display]);
-                       pending_video_frames.erase(current_frame_display);
-               }
-
-               // Wait for the GPU to be done with the frame.
-               glClientWaitSync(frame.fence.get(), 0, 0);
-
-               // Release back any input frames we needed to render this frame.
-               frame.input_frames.clear();
-
-               // Unmap the image.
-               GLSurface *surf = &gl_surfaces[current_frame_display % SURFACE_NUM];
-               eglDestroyImageKHR(eglGetCurrentDisplay(), surf->y_egl_image);
-               eglDestroyImageKHR(eglGetCurrentDisplay(), surf->cbcr_egl_image);
-               VAStatus va_status = vaReleaseBufferHandle(va_dpy, surf->surface_image.buf);
-               CHECK_VASTATUS(va_status, "vaReleaseBufferHandle");
-               va_status = vaDestroyImage(va_dpy, surf->surface_image.image_id);
-               CHECK_VASTATUS(va_status, "vaDestroyImage");
-
-               VASurfaceID surface = surf->src_surface;
-
-               // Schedule the frame for encoding.
-               va_status = vaBeginPicture(va_dpy, context_id, surface);
-               CHECK_VASTATUS(va_status, "vaBeginPicture");
-
-               if (current_frame_type == FRAME_IDR) {
-                       render_sequence();
-                       render_picture();            
-                       if (h264_packedheader) {
-                               render_packedsequence();
-                               render_packedpicture();
+                       if (copy_thread_should_quit) {
+                               return;
+                       } else {
+                               frame = move(pending_video_frames[current_frame_display]);
+                               pending_video_frames.erase(current_frame_display);
                        }
-               } else {
-                       //render_sequence();
-                       render_picture();
                }
-               render_slice();
-               
-               va_status = vaEndPicture(va_dpy, context_id);
-               CHECK_VASTATUS(va_status, "vaEndPicture");
 
-               // Determine the pts and dts of this frame.
-               int64_t pts = frame.pts;
+               // Determine the dts of this frame.
                int64_t dts;
                if (pts_lag == -1) {
                        assert(last_dts != -1);
-                       dts = last_dts + (TIMEBASE / frame_rate);
+                       dts = last_dts + (TIMEBASE / MAX_FPS);
                } else {
-                       dts = pts - pts_lag;
+                       dts = frame.pts - pts_lag;
                }
                last_dts = dts;
 
-               // so now the data is done encoding (well, async job kicked off)...
-               // we send that to the storage thread
-               storage_task tmp;
-               tmp.display_order = current_frame_display;
-               tmp.frame_type = current_frame_type;
-               tmp.pts = pts;
-               tmp.dts = dts;
-               storage_task_enqueue(move(tmp));
-               
-               update_ReferenceFrames();
+               encode_frame(frame, frame.pts, dts);
                ++current_frame_encoding;
        }
 }
+
+void H264Encoder::encode_frame(H264Encoder::PendingFrame frame, int64_t pts, int64_t dts)
+{
+       // Wait for the GPU to be done with the frame.
+       glClientWaitSync(frame.fence.get(), 0, 0);
+
+       // Release back any input frames we needed to render this frame.
+       frame.input_frames.clear();
+
+       // Unmap the image.
+       GLSurface *surf = &gl_surfaces[current_frame_display % SURFACE_NUM];
+       eglDestroyImageKHR(eglGetCurrentDisplay(), surf->y_egl_image);
+       eglDestroyImageKHR(eglGetCurrentDisplay(), surf->cbcr_egl_image);
+       VAStatus va_status = vaReleaseBufferHandle(va_dpy, surf->surface_image.buf);
+       CHECK_VASTATUS(va_status, "vaReleaseBufferHandle");
+       va_status = vaDestroyImage(va_dpy, surf->surface_image.image_id);
+       CHECK_VASTATUS(va_status, "vaDestroyImage");
+
+       VASurfaceID surface = surf->src_surface;
+
+       // Schedule the frame for encoding.
+       va_status = vaBeginPicture(va_dpy, context_id, surface);
+       CHECK_VASTATUS(va_status, "vaBeginPicture");
+
+       if (current_frame_type == FRAME_IDR) {
+               render_sequence();
+               render_picture();            
+               if (h264_packedheader) {
+                       render_packedsequence();
+                       render_packedpicture();
+               }
+       } else {
+               //render_sequence();
+               render_picture();
+       }
+       render_slice();
+
+       va_status = vaEndPicture(va_dpy, context_id);
+       CHECK_VASTATUS(va_status, "vaEndPicture");
+
+       // so now the data is done encoding (well, async job kicked off)...
+       // we send that to the storage thread
+       storage_task tmp;
+       tmp.display_order = current_frame_display;
+       tmp.frame_type = current_frame_type;
+       tmp.pts = pts;
+       tmp.dts = dts;
+       storage_task_enqueue(move(tmp));
+
+       update_ReferenceFrames();
+}