X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=h264encode.cpp;h=a16a5ea0b376dc4ed5678a48b7c4417ef433533e;hb=77b3efa1dc21f1fb1b64ba89f67910563804aa1b;hp=4b900bfdab3f468704d4a3b2689980b19a338748;hpb=08cf2c37c4178cf5dd029f960f1553ff111bb48e;p=nageru diff --git a/h264encode.cpp b/h264encode.cpp index 4b900bf..a16a5ea 100644 --- a/h264encode.cpp +++ b/h264encode.cpp @@ -7,11 +7,11 @@ #include #include #include -#include -#include +#include +#include #include +#include #include -#include #include #include #include @@ -20,12 +20,16 @@ #include #include #include +#include +#include #include #include #include #include +#include #include "context.h" +#include "defs.h" #include "httpd.h" #include "timebase.h" @@ -110,7 +114,7 @@ static int frame_width = 176; static int frame_height = 144; static int frame_width_mbaligned; static int frame_height_mbaligned; -static int frame_rate = 60; +static int frame_rate = FPS; static unsigned int frame_bitrate = 0; static unsigned int frame_slices = 1; static double frame_size = 0; @@ -118,7 +122,7 @@ static int initial_qp = 15; //static int initial_qp = 28; static int minimal_qp = 0; static int intra_period = 30; -static int intra_idr_period = 60; +static int intra_idr_period = FPS; static int ip_period = 3; static int rc_mode = -1; static int rc_default_modes[] = { @@ -347,7 +351,7 @@ static void sps_rbsp(bitstream *bs) bitstream_put_ui(bs, 1, 1); /* colour_description_present_flag */ { bitstream_put_ui(bs, 1, 8); /* colour_primaries (1 = BT.709) */ - bitstream_put_ui(bs, 1, 8); /* transfer_characteristics (1 = BT.709) */ + bitstream_put_ui(bs, 2, 8); /* transfer_characteristics (2 = unspecified, since we use sRGB) */ bitstream_put_ui(bs, 6, 8); /* matrix_coefficients (6 = BT.601/SMPTE 170M) */ } } @@ -1465,6 +1469,12 @@ static int render_picture(void) va_status = vaRenderPicture(va_dpy, context_id, &pic_param_buf, 1); CHECK_VASTATUS(va_status, "vaRenderPicture"); + // Supposedly vaRenderPicture() is supposed to destroy the buffer implicitly, + // but if we don't delete it here, we get leaks. The GStreamer implementation + // does the same. + va_status = vaDestroyBuffer(va_dpy, pic_param_buf); + CHECK_VASTATUS(va_status, "vaDestroyBuffer"); + return 0; } @@ -1631,11 +1641,17 @@ static int render_slice(void) va_status = vaCreateBuffer(va_dpy, context_id, VAEncSliceParameterBufferType, sizeof(slice_param), 1, &slice_param, &slice_param_buf); - CHECK_VASTATUS(va_status, "vaCreateBuffer");; + CHECK_VASTATUS(va_status, "vaCreateBuffer"); va_status = vaRenderPicture(va_dpy, context_id, &slice_param_buf, 1); CHECK_VASTATUS(va_status, "vaRenderPicture"); - + + // Supposedly vaRenderPicture() is supposed to destroy the buffer implicitly, + // but if we don't delete it here, we get leaks. The GStreamer implementation + // does the same. + va_status = vaDestroyBuffer(va_dpy, slice_param_buf); + CHECK_VASTATUS(va_status, "vaDestroyBuffer"); + return 0; } @@ -1694,14 +1710,19 @@ int H264Encoder::save_codeddata(storage_task task) AVFrame *frame = avcodec_alloc_frame(); frame->nb_samples = audio.size() / 2; - frame->format = AV_SAMPLE_FMT_FLT; + frame->format = AV_SAMPLE_FMT_S32; frame->channel_layout = AV_CH_LAYOUT_STEREO; - unique_ptr planar_samples(new float[audio.size()]); - avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_FLTP, (const uint8_t*)planar_samples.get(), audio.size() * sizeof(float), 0); - for (int i = 0; i < frame->nb_samples; ++i) { - planar_samples[i] = audio[i * 2 + 0]; - planar_samples[i + frame->nb_samples] = audio[i * 2 + 1]; + unique_ptr int_samples(new int32_t[audio.size()]); + avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), audio.size() * sizeof(int32_t), 0); + for (int i = 0; i < frame->nb_samples * 2; ++i) { + if (audio[i] >= 1.0f) { + int_samples[i] = 2147483647; + } else if (audio[i] <= -1.0f) { + int_samples[i] = -2147483647; + } else { + int_samples[i] = lrintf(audio[i] * 2147483647.0f); + } } AVPacket pkt; @@ -1716,6 +1737,7 @@ int H264Encoder::save_codeddata(storage_task task) } // TODO: Delayed frames. avcodec_free_frame(&frame); + av_free_packet(&pkt); } #if 0 @@ -1829,11 +1851,11 @@ static int print_input() H264Encoder::H264Encoder(QSurface *surface, int width, int height, HTTPD *httpd) : current_storage_frame(0), surface(surface), httpd(httpd) { - AVCodec *codec_audio = avcodec_find_encoder(AV_CODEC_ID_MP3); + AVCodec *codec_audio = avcodec_find_encoder(AUDIO_OUTPUT_CODEC); context_audio = avcodec_alloc_context3(codec_audio); - context_audio->bit_rate = 256000; - context_audio->sample_rate = 48000; - context_audio->sample_fmt = AV_SAMPLE_FMT_FLTP; + context_audio->bit_rate = AUDIO_OUTPUT_BIT_RATE; + context_audio->sample_rate = OUTPUT_FREQUENCY; + context_audio->sample_fmt = AUDIO_OUTPUT_SAMPLE_FMT; context_audio->channels = 2; context_audio->channel_layout = AV_CH_LAYOUT_STEREO; context_audio->time_base = AVRational{1, TIMEBASE}; @@ -1865,7 +1887,7 @@ H264Encoder::H264Encoder(QSurface *surface, int width, int height, HTTPD *httpd) copy_thread = std::thread([this]{ //SDL_GL_MakeCurrent(window, context); - QOpenGLContext *context = create_context(); + QOpenGLContext *context = create_context(this->surface); eglBindAPI(EGL_OPENGL_API); if (!make_current(context, this->surface)) { printf("display=%p surface=%p context=%p curr=%p err=%d\n", eglGetCurrentDisplay(), this->surface, context, eglGetCurrentContext(),