]> git.sesse.net Git - nageru/blobdiff - h264encode.cpp
Fix a leak in RefCountedFrame.
[nageru] / h264encode.cpp
index ad688fc58ee87b59e5232b94ad75bc4c24b36ad1..3d8b463173aab3b5d2f3486d6b02688dc2db8f57 100644 (file)
@@ -29,6 +29,7 @@
 #include <utility>
 
 #include "context.h"
+#include "defs.h"
 #include "httpd.h"
 #include "timebase.h"
 
@@ -113,7 +114,7 @@ static  int frame_width = 176;
 static  int frame_height = 144;
 static  int frame_width_mbaligned;
 static  int frame_height_mbaligned;
-static  int frame_rate = 60;
+static  int frame_rate = FPS;
 static  unsigned int frame_bitrate = 0;
 static  unsigned int frame_slices = 1;
 static  double frame_size = 0;
@@ -121,7 +122,7 @@ static  int initial_qp = 15;
 //static  int initial_qp = 28;
 static  int minimal_qp = 0;
 static  int intra_period = 30;
-static  int intra_idr_period = 60;
+static  int intra_idr_period = FPS;
 static  int ip_period = 3;
 static  int rc_mode = -1;
 static  int rc_default_modes[] = {
@@ -154,6 +155,20 @@ typedef struct __bitstream bitstream;
 
 using namespace std;
 
+// Supposedly vaRenderPicture() is supposed to destroy the buffer implicitly,
+// but if we don't delete it here, we get leaks. The GStreamer implementation
+// does the same.
+static void render_picture_and_delete(VADisplay dpy, VAContextID context, VABufferID *buffers, int num_buffers)
+{
+    VAStatus va_status = vaRenderPicture(dpy, context, buffers, num_buffers);
+    CHECK_VASTATUS(va_status, "vaRenderPicture");
+
+    for (int i = 0; i < num_buffers; ++i) {
+        va_status = vaDestroyBuffer(va_dpy, buffers[i]);
+        CHECK_VASTATUS(va_status, "vaDestroyBuffer");
+    }
+}
+
 static unsigned int 
 va_swap32(unsigned int val)
 {
@@ -1370,8 +1385,7 @@ static int render_sequence(void)
     render_id[0] = seq_param_buf;
     render_id[1] = rc_param_buf;
     
-    va_status = vaRenderPicture(va_dpy, context_id, &render_id[0], 2);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");;
+    render_picture_and_delete(va_dpy, context_id, &render_id[0], 2);
 
     if (misc_priv_type != 0) {
         va_status = vaCreateBuffer(va_dpy, context_id,
@@ -1384,7 +1398,7 @@ static int render_sequence(void)
         misc_param_tmp->data[0] = misc_priv_value;
         vaUnmapBuffer(va_dpy, misc_param_tmpbuf);
     
-        va_status = vaRenderPicture(va_dpy, context_id, &misc_param_tmpbuf, 1);
+        render_picture_and_delete(va_dpy, context_id, &misc_param_tmpbuf, 1);
     }
     
     return 0;
@@ -1463,10 +1477,9 @@ static int render_picture(void)
 
     va_status = vaCreateBuffer(va_dpy, context_id, VAEncPictureParameterBufferType,
                                sizeof(pic_param), 1, &pic_param, &pic_param_buf);
-    CHECK_VASTATUS(va_status, "vaCreateBuffer");;
+    CHECK_VASTATUS(va_status, "vaCreateBuffer");
 
-    va_status = vaRenderPicture(va_dpy, context_id, &pic_param_buf, 1);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
+    render_picture_and_delete(va_dpy, context_id, &pic_param_buf, 1);
 
     return 0;
 }
@@ -1501,8 +1514,7 @@ static int render_packedsequence(void)
 
     render_id[0] = packedseq_para_bufid;
     render_id[1] = packedseq_data_bufid;
-    va_status = vaRenderPicture(va_dpy, context_id, render_id, 2);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
+    render_picture_and_delete(va_dpy, context_id, render_id, 2);
 
     free(packedseq_buffer);
     
@@ -1539,8 +1551,7 @@ static int render_packedpicture(void)
 
     render_id[0] = packedpic_para_bufid;
     render_id[1] = packedpic_data_bufid;
-    va_status = vaRenderPicture(va_dpy, context_id, render_id, 2);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
+    render_picture_and_delete(va_dpy, context_id, render_id, 2);
 
     free(packedpic_buffer);
     
@@ -1576,8 +1587,7 @@ static void render_packedslice()
 
     render_id[0] = packedslice_para_bufid;
     render_id[1] = packedslice_data_bufid;
-    va_status = vaRenderPicture(va_dpy, context_id, render_id, 2);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
+    render_picture_and_delete(va_dpy, context_id, render_id, 2);
 
     free(packedslice_buffer);
 }
@@ -1634,11 +1644,10 @@ static int render_slice(void)
 
     va_status = vaCreateBuffer(va_dpy, context_id, VAEncSliceParameterBufferType,
                                sizeof(slice_param), 1, &slice_param, &slice_param_buf);
-    CHECK_VASTATUS(va_status, "vaCreateBuffer");;
+    CHECK_VASTATUS(va_status, "vaCreateBuffer");
+
+    render_picture_and_delete(va_dpy, context_id, &slice_param_buf, 1);
 
-    va_status = vaRenderPicture(va_dpy, context_id, &slice_param_buf, 1);
-    CHECK_VASTATUS(va_status, "vaRenderPicture");
-    
     return 0;
 }
 
@@ -1697,14 +1706,19 @@ int H264Encoder::save_codeddata(storage_task task)
 
         AVFrame *frame = avcodec_alloc_frame();
         frame->nb_samples = audio.size() / 2;
-        frame->format = AV_SAMPLE_FMT_FLT;
+        frame->format = AV_SAMPLE_FMT_S32;
         frame->channel_layout = AV_CH_LAYOUT_STEREO;
 
-        unique_ptr<float[]> planar_samples(new float[audio.size()]);
-        avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_FLTP, (const uint8_t*)planar_samples.get(), audio.size() * sizeof(float), 0);
-        for (int i = 0; i < frame->nb_samples; ++i) {
-            planar_samples[i] = audio[i * 2 + 0];
-            planar_samples[i + frame->nb_samples] = audio[i * 2 + 1];
+        unique_ptr<int32_t[]> int_samples(new int32_t[audio.size()]);
+        avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), audio.size() * sizeof(int32_t), 0);
+        for (int i = 0; i < frame->nb_samples * 2; ++i) {
+            if (audio[i] >= 1.0f) {
+                int_samples[i] = 2147483647;
+            } else if (audio[i] <= -1.0f) {
+                int_samples[i] = -2147483647;
+            } else {
+                int_samples[i] = lrintf(audio[i] * 2147483647.0f);
+            }
         }
 
         AVPacket pkt;
@@ -1719,6 +1733,7 @@ int H264Encoder::save_codeddata(storage_task task)
         }
         // TODO: Delayed frames.
         avcodec_free_frame(&frame);
+        av_free_packet(&pkt);
     }
 
 #if 0
@@ -1832,11 +1847,11 @@ static int print_input()
 H264Encoder::H264Encoder(QSurface *surface, int width, int height, HTTPD *httpd)
        : current_storage_frame(0), surface(surface), httpd(httpd)
 {
-       AVCodec *codec_audio = avcodec_find_encoder(AV_CODEC_ID_MP3);
+       AVCodec *codec_audio = avcodec_find_encoder(AUDIO_OUTPUT_CODEC);
        context_audio = avcodec_alloc_context3(codec_audio);
-       context_audio->bit_rate = 256000;
-       context_audio->sample_rate = 48000;
-       context_audio->sample_fmt = AV_SAMPLE_FMT_FLTP;
+       context_audio->bit_rate = AUDIO_OUTPUT_BIT_RATE;
+       context_audio->sample_rate = OUTPUT_FREQUENCY;
+       context_audio->sample_fmt = AUDIO_OUTPUT_SAMPLE_FMT;
        context_audio->channels = 2;
        context_audio->channel_layout = AV_CH_LAYOUT_STEREO;
        context_audio->time_base = AVRational{1, TIMEBASE};