]> git.sesse.net Git - nageru/blobdiff - h264encode.cpp
Do some ALSA tweaks that will hopefully get rid of the underrun loops.
[nageru] / h264encode.cpp
index eb6faf3c4ec20a4c81fdde303c75b48dfc068761..d20683518784fd51c1a15d8089b46e26d7981678 100644 (file)
@@ -7,11 +7,11 @@
 #include <assert.h>
 #include <epoxy/egl.h>
 #include <libavcodec/avcodec.h>
-#include <libavformat/avio.h>
-#include <libavutil/mathematics.h>
+#include <libavutil/channel_layout.h>
+#include <libavutil/frame.h>
 #include <libavutil/rational.h>
+#include <libavutil/samplefmt.h>
 #include <libdrm/drm_fourcc.h>
-#include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 #include <va/va_enc_h264.h>
 #include <va/va_x11.h>
 #include <condition_variable>
+#include <cstdint>
+#include <memory>
 #include <mutex>
 #include <queue>
 #include <string>
 #include <thread>
+#include <utility>
 
 #include "context.h"
+#include "defs.h"
 #include "httpd.h"
 #include "timebase.h"
 
@@ -110,7 +114,7 @@ static  int frame_width = 176;
 static  int frame_height = 144;
 static  int frame_width_mbaligned;
 static  int frame_height_mbaligned;
-static  int frame_rate = 60;
+static  int frame_rate = FPS;
 static  unsigned int frame_bitrate = 0;
 static  unsigned int frame_slices = 1;
 static  double frame_size = 0;
@@ -118,7 +122,7 @@ static  int initial_qp = 15;
 //static  int initial_qp = 28;
 static  int minimal_qp = 0;
 static  int intra_period = 30;
-static  int intra_idr_period = 60;
+static  int intra_idr_period = FPS;
 static  int ip_period = 3;
 static  int rc_mode = -1;
 static  int rc_default_modes[] = {
@@ -151,6 +155,20 @@ typedef struct __bitstream bitstream;
 
 using namespace std;
 
+// Supposedly vaRenderPicture() is supposed to destroy the buffer implicitly,
+// but if we don't delete it here, we get leaks. The GStreamer implementation
+// does the same.
+static void render_picture_and_delete(VADisplay dpy, VAContextID context, VABufferID *buffers, int num_buffers)
+{
+    VAStatus va_status = vaRenderPicture(dpy, context, buffers, num_buffers);
+    CHECK_VASTATUS(va_status, "vaRenderPicture");
+
+    for (int i = 0; i < num_buffers; ++i) {
+        va_status = vaDestroyBuffer(va_dpy, buffers[i]);
+        CHECK_VASTATUS(va_status, "vaDestroyBuffer");
+    }
+}
+
 static unsigned int 
 va_swap32(unsigned int val)
 {
@@ -343,12 +361,12 @@ static void sps_rbsp(bitstream *bs)
         bitstream_put_ui(bs, 1, 1); /* video_signal_type_present_flag */
         {
             bitstream_put_ui(bs, 5, 3);  /* video_format (5 = Unspecified) */
-            bitstream_put_ui(bs, 1, 1);  /* video_full_range_flag */
+            bitstream_put_ui(bs, 0, 1);  /* video_full_range_flag */
             bitstream_put_ui(bs, 1, 1);  /* colour_description_present_flag */
             {
                 bitstream_put_ui(bs, 1, 8);  /* colour_primaries (1 = BT.709) */
-                bitstream_put_ui(bs, 1, 8);  /* transfer_characteristics (1 = BT.709) */
-                bitstream_put_ui(bs, 1, 8);  /* matrix_coefficients (1 = BT.709) */
+                bitstream_put_ui(bs, 2, 8);  /* transfer_characteristics (2 = unspecified, since we use sRGB) */
+                bitstream_put_ui(bs, 6, 8);  /* matrix_coefficients (6 = BT.601/SMPTE 170M) */
             }
         }
         bitstream_put_ui(bs, 0, 1); /* chroma_loc_info_present_flag */
@@ -1367,8 +1385,7 @@ static int render_sequence(void)
     render_id[0] = seq_param_buf;
     render_id[1] = rc_param_buf;
     
-    va_status = vaRenderPicture(va_dpy, context_id, &render_id[0], 2);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");;
+    render_picture_and_delete(va_dpy, context_id, &render_id[0], 2);
 
     if (misc_priv_type != 0) {
         va_status = vaCreateBuffer(va_dpy, context_id,
@@ -1381,7 +1398,7 @@ static int render_sequence(void)
         misc_param_tmp->data[0] = misc_priv_value;
         vaUnmapBuffer(va_dpy, misc_param_tmpbuf);
     
-        va_status = vaRenderPicture(va_dpy, context_id, &misc_param_tmpbuf, 1);
+        render_picture_and_delete(va_dpy, context_id, &misc_param_tmpbuf, 1);
     }
     
     return 0;
@@ -1460,10 +1477,9 @@ static int render_picture(void)
 
     va_status = vaCreateBuffer(va_dpy, context_id, VAEncPictureParameterBufferType,
                                sizeof(pic_param), 1, &pic_param, &pic_param_buf);
-    CHECK_VASTATUS(va_status, "vaCreateBuffer");;
+    CHECK_VASTATUS(va_status, "vaCreateBuffer");
 
-    va_status = vaRenderPicture(va_dpy, context_id, &pic_param_buf, 1);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
+    render_picture_and_delete(va_dpy, context_id, &pic_param_buf, 1);
 
     return 0;
 }
@@ -1498,8 +1514,7 @@ static int render_packedsequence(void)
 
     render_id[0] = packedseq_para_bufid;
     render_id[1] = packedseq_data_bufid;
-    va_status = vaRenderPicture(va_dpy, context_id, render_id, 2);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
+    render_picture_and_delete(va_dpy, context_id, render_id, 2);
 
     free(packedseq_buffer);
     
@@ -1536,8 +1551,7 @@ static int render_packedpicture(void)
 
     render_id[0] = packedpic_para_bufid;
     render_id[1] = packedpic_data_bufid;
-    va_status = vaRenderPicture(va_dpy, context_id, render_id, 2);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
+    render_picture_and_delete(va_dpy, context_id, render_id, 2);
 
     free(packedpic_buffer);
     
@@ -1573,8 +1587,7 @@ static void render_packedslice()
 
     render_id[0] = packedslice_para_bufid;
     render_id[1] = packedslice_data_bufid;
-    va_status = vaRenderPicture(va_dpy, context_id, render_id, 2);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
+    render_picture_and_delete(va_dpy, context_id, render_id, 2);
 
     free(packedslice_buffer);
 }
@@ -1631,17 +1644,16 @@ static int render_slice(void)
 
     va_status = vaCreateBuffer(va_dpy, context_id, VAEncSliceParameterBufferType,
                                sizeof(slice_param), 1, &slice_param, &slice_param_buf);
-    CHECK_VASTATUS(va_status, "vaCreateBuffer");;
+    CHECK_VASTATUS(va_status, "vaCreateBuffer");
+
+    render_picture_and_delete(va_dpy, context_id, &slice_param_buf, 1);
 
-    va_status = vaRenderPicture(va_dpy, context_id, &slice_param_buf, 1);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
-    
     return 0;
 }
 
 
 
-int H264Encoder::save_codeddata(storage_task task)
+void H264Encoder::save_codeddata(storage_task task)
 {    
     VACodedBufferSegment *buf_list = NULL;
     VAStatus va_status;
@@ -1678,13 +1690,13 @@ int H264Encoder::save_codeddata(storage_task task)
         httpd->add_packet(pkt, task.pts + global_delay, task.dts + global_delay);
     }
     // Encode and add all audio frames up to and including the pts of this video frame.
-    // (They can never be queued to us after the video frame they belong to, only before.)
     for ( ;; ) {
         int64_t audio_pts;
         std::vector<float> audio;
         {
              unique_lock<mutex> lock(frame_queue_mutex);
-             if (pending_audio_frames.empty()) break;
+             frame_queue_nonempty.wait(lock, [this]{ return copy_thread_should_quit || !pending_audio_frames.empty(); });
+             if (copy_thread_should_quit) return;
              auto it = pending_audio_frames.begin();
              if (it->first > task.pts) break;
              audio_pts = it->first;
@@ -1694,14 +1706,19 @@ int H264Encoder::save_codeddata(storage_task task)
 
         AVFrame *frame = avcodec_alloc_frame();
         frame->nb_samples = audio.size() / 2;
-        frame->format = AV_SAMPLE_FMT_FLT;
+        frame->format = AV_SAMPLE_FMT_S32;
         frame->channel_layout = AV_CH_LAYOUT_STEREO;
 
-        unique_ptr<float[]> planar_samples(new float[audio.size()]);
-        avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_FLTP, (const uint8_t*)planar_samples.get(), audio.size() * sizeof(float), 0);
-        for (int i = 0; i < frame->nb_samples; ++i) {
-            planar_samples[i] = audio[i * 2 + 0];
-            planar_samples[i + frame->nb_samples] = audio[i * 2 + 1];
+        unique_ptr<int32_t[]> int_samples(new int32_t[audio.size()]);
+        avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), audio.size() * sizeof(int32_t), 0);
+        for (int i = 0; i < frame->nb_samples * 2; ++i) {
+            if (audio[i] >= 1.0f) {
+                int_samples[i] = 2147483647;
+            } else if (audio[i] <= -1.0f) {
+                int_samples[i] = -2147483647;
+            } else {
+                int_samples[i] = lrintf(audio[i] * 2147483647.0f);
+            }
         }
 
         AVPacket pkt;
@@ -1716,6 +1733,8 @@ int H264Encoder::save_codeddata(storage_task task)
         }
         // TODO: Delayed frames.
         avcodec_free_frame(&frame);
+        av_free_packet(&pkt);
+        if (audio_pts == task.pts) break;
     }
 
 #if 0
@@ -1737,8 +1756,6 @@ int H264Encoder::save_codeddata(storage_task task)
     printf("%08lld", encode_order);
     printf("(%06d bytes coded)", coded_size);
 #endif
-
-    return 0;
 }
 
 
@@ -1829,11 +1846,11 @@ static int print_input()
 H264Encoder::H264Encoder(QSurface *surface, int width, int height, HTTPD *httpd)
        : current_storage_frame(0), surface(surface), httpd(httpd)
 {
-       AVCodec *codec_audio = avcodec_find_encoder(AV_CODEC_ID_MP3);
+       AVCodec *codec_audio = avcodec_find_encoder(AUDIO_OUTPUT_CODEC);
        context_audio = avcodec_alloc_context3(codec_audio);
-       context_audio->bit_rate = 256000;
-       context_audio->sample_rate = 48000;
-       context_audio->sample_fmt = AV_SAMPLE_FMT_FLTP;
+       context_audio->bit_rate = AUDIO_OUTPUT_BIT_RATE;
+       context_audio->sample_rate = OUTPUT_FREQUENCY;
+       context_audio->sample_fmt = AUDIO_OUTPUT_SAMPLE_FMT;
        context_audio->channels = 2;
        context_audio->channel_layout = AV_CH_LAYOUT_STEREO;
        context_audio->time_base = AVRational{1, TIMEBASE};
@@ -1865,7 +1882,7 @@ H264Encoder::H264Encoder(QSurface *surface, int width, int height, HTTPD *httpd)
 
        copy_thread = std::thread([this]{
                //SDL_GL_MakeCurrent(window, context);
-               QOpenGLContext *context = create_context();
+               QOpenGLContext *context = create_context(this->surface);
                eglBindAPI(EGL_OPENGL_API);
                if (!make_current(context, this->surface)) {
                        printf("display=%p surface=%p context=%p curr=%p err=%d\n", eglGetCurrentDisplay(), this->surface, context, eglGetCurrentContext(),
@@ -1886,7 +1903,7 @@ H264Encoder::~H264Encoder()
        {
                unique_lock<mutex> lock(frame_queue_mutex);
                copy_thread_should_quit = true;
-               frame_queue_nonempty.notify_one();
+               frame_queue_nonempty.notify_all();
        }
        storage_thread.join();
        copy_thread.join();
@@ -1965,10 +1982,9 @@ void H264Encoder::add_audio(int64_t pts, std::vector<float> audio)
                unique_lock<mutex> lock(frame_queue_mutex);
                pending_audio_frames[pts] = move(audio);
        }
-       frame_queue_nonempty.notify_one();
+       frame_queue_nonempty.notify_all();
 }
 
-
 void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const std::vector<RefCountedFrame> &input_frames)
 {
        {
@@ -1976,7 +1992,7 @@ void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const std::vect
                pending_video_frames[current_storage_frame] = PendingFrame{ fence, input_frames, pts };
                ++current_storage_frame;
        }
-       frame_queue_nonempty.notify_one();
+       frame_queue_nonempty.notify_all();
 }
 
 void H264Encoder::copy_thread_func()