X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=h264encode.cpp;h=0946eb02b06126f0fcfc6c5a197e4d933daef814;hb=9c8a63d449829cbaa449cb177cfb01c7a1d2f358;hp=5fa01bd48d29ab4a2286ed88117544989961a9ef;hpb=7a6878b637f9b323a83411d155437752cd83eff0;p=nageru diff --git a/h264encode.cpp b/h264encode.cpp index 5fa01bd..0946eb0 100644 --- a/h264encode.cpp +++ b/h264encode.cpp @@ -7,11 +7,11 @@ #include #include #include -#include -#include +#include +#include #include +#include #include -#include #include #include #include @@ -20,12 +20,17 @@ #include #include #include +#include +#include #include #include #include #include +#include #include "context.h" +#include "defs.h" +#include "httpd.h" #include "timebase.h" class QOpenGLContext; @@ -109,7 +114,6 @@ static int frame_width = 176; static int frame_height = 144; static int frame_width_mbaligned; static int frame_height_mbaligned; -static int frame_rate = 60; static unsigned int frame_bitrate = 0; static unsigned int frame_slices = 1; static double frame_size = 0; @@ -117,7 +121,7 @@ static int initial_qp = 15; //static int initial_qp = 28; static int minimal_qp = 0; static int intra_period = 30; -static int intra_idr_period = 60; +static int intra_idr_period = MAX_FPS; // About a second; more at lower frame rates. Not ideal. static int ip_period = 3; static int rc_mode = -1; static int rc_default_modes[] = { @@ -150,6 +154,20 @@ typedef struct __bitstream bitstream; using namespace std; +// Supposedly vaRenderPicture() is supposed to destroy the buffer implicitly, +// but if we don't delete it here, we get leaks. The GStreamer implementation +// does the same. +static void render_picture_and_delete(VADisplay dpy, VAContextID context, VABufferID *buffers, int num_buffers) +{ + VAStatus va_status = vaRenderPicture(dpy, context, buffers, num_buffers); + CHECK_VASTATUS(va_status, "vaRenderPicture"); + + for (int i = 0; i < num_buffers; ++i) { + va_status = vaDestroyBuffer(va_dpy, buffers[i]); + CHECK_VASTATUS(va_status, "vaDestroyBuffer"); + } +} + static unsigned int va_swap32(unsigned int val) { @@ -339,7 +357,17 @@ static void sps_rbsp(bitstream *bs) bitstream_put_ui(bs, 1, 1); /* vui_parameters_present_flag */ bitstream_put_ui(bs, 0, 1); /* aspect_ratio_info_present_flag */ bitstream_put_ui(bs, 0, 1); /* overscan_info_present_flag */ - bitstream_put_ui(bs, 0, 1); /* video_signal_type_present_flag */ + bitstream_put_ui(bs, 1, 1); /* video_signal_type_present_flag */ + { + bitstream_put_ui(bs, 5, 3); /* video_format (5 = Unspecified) */ + bitstream_put_ui(bs, 0, 1); /* video_full_range_flag */ + bitstream_put_ui(bs, 1, 1); /* colour_description_present_flag */ + { + bitstream_put_ui(bs, 1, 8); /* colour_primaries (1 = BT.709) */ + bitstream_put_ui(bs, 2, 8); /* transfer_characteristics (2 = unspecified, since we use sRGB) */ + bitstream_put_ui(bs, 6, 8); /* matrix_coefficients (6 = BT.601/SMPTE 170M) */ + } + } bitstream_put_ui(bs, 0, 1); /* chroma_loc_info_present_flag */ bitstream_put_ui(bs, 1, 1); /* timing_info_present_flag */ { @@ -590,7 +618,7 @@ static int build_packed_slice_buffer(unsigned char **header_buffer) // // Getting pts and dts right with variable frame rate (VFR) and B-frames can be a // bit tricky. We assume first of all that the frame rate never goes _above_ -// , which gives us a frame period N. The decoder can always decode +// MAX_FPS, which gives us a frame period N. The decoder can always decode // in at least this speed, as long at dts <= pts (the frame is not attempted // presented before it is decoded). Furthermore, we never have longer chains of // B-frames than a fixed constant C. (In a B-frame chain, we say that the base @@ -683,7 +711,7 @@ void encoding2display_order( *displaying_order = encoding_order; // IDR frames are a special case; I honestly can't find the logic behind // why this is the right thing, but it seems to line up nicely in practice :-) - *pts_lag = TIMEBASE / frame_rate; + *pts_lag = TIMEBASE / MAX_FPS; } else if (((encoding_order_gop - 1) % ip_period) != 0) { /* B frames */ *frame_type = FRAME_B; *displaying_order = encoding_order - 1; @@ -844,7 +872,7 @@ static int process_cmdline(int argc, char *argv[]) } if (frame_bitrate == 0) - frame_bitrate = frame_width * frame_height * 12 * frame_rate / 50; + frame_bitrate = frame_width * frame_height * 12 * MAX_FPS / 50; if (coded_fn == NULL) { struct stat buf; @@ -1356,8 +1384,7 @@ static int render_sequence(void) render_id[0] = seq_param_buf; render_id[1] = rc_param_buf; - va_status = vaRenderPicture(va_dpy, context_id, &render_id[0], 2); - CHECK_VASTATUS(va_status, "vaRenderPicture");; + render_picture_and_delete(va_dpy, context_id, &render_id[0], 2); if (misc_priv_type != 0) { va_status = vaCreateBuffer(va_dpy, context_id, @@ -1370,7 +1397,7 @@ static int render_sequence(void) misc_param_tmp->data[0] = misc_priv_value; vaUnmapBuffer(va_dpy, misc_param_tmpbuf); - va_status = vaRenderPicture(va_dpy, context_id, &misc_param_tmpbuf, 1); + render_picture_and_delete(va_dpy, context_id, &misc_param_tmpbuf, 1); } return 0; @@ -1449,10 +1476,9 @@ static int render_picture(void) va_status = vaCreateBuffer(va_dpy, context_id, VAEncPictureParameterBufferType, sizeof(pic_param), 1, &pic_param, &pic_param_buf); - CHECK_VASTATUS(va_status, "vaCreateBuffer");; + CHECK_VASTATUS(va_status, "vaCreateBuffer"); - va_status = vaRenderPicture(va_dpy, context_id, &pic_param_buf, 1); - CHECK_VASTATUS(va_status, "vaRenderPicture"); + render_picture_and_delete(va_dpy, context_id, &pic_param_buf, 1); return 0; } @@ -1487,8 +1513,7 @@ static int render_packedsequence(void) render_id[0] = packedseq_para_bufid; render_id[1] = packedseq_data_bufid; - va_status = vaRenderPicture(va_dpy, context_id, render_id, 2); - CHECK_VASTATUS(va_status, "vaRenderPicture"); + render_picture_and_delete(va_dpy, context_id, render_id, 2); free(packedseq_buffer); @@ -1525,8 +1550,7 @@ static int render_packedpicture(void) render_id[0] = packedpic_para_bufid; render_id[1] = packedpic_data_bufid; - va_status = vaRenderPicture(va_dpy, context_id, render_id, 2); - CHECK_VASTATUS(va_status, "vaRenderPicture"); + render_picture_and_delete(va_dpy, context_id, render_id, 2); free(packedpic_buffer); @@ -1562,8 +1586,7 @@ static void render_packedslice() render_id[0] = packedslice_para_bufid; render_id[1] = packedslice_data_bufid; - va_status = vaRenderPicture(va_dpy, context_id, render_id, 2); - CHECK_VASTATUS(va_status, "vaRenderPicture"); + render_picture_and_delete(va_dpy, context_id, render_id, 2); free(packedslice_buffer); } @@ -1620,17 +1643,16 @@ static int render_slice(void) va_status = vaCreateBuffer(va_dpy, context_id, VAEncSliceParameterBufferType, sizeof(slice_param), 1, &slice_param, &slice_param_buf); - CHECK_VASTATUS(va_status, "vaCreateBuffer");; + CHECK_VASTATUS(va_status, "vaCreateBuffer"); + + render_picture_and_delete(va_dpy, context_id, &slice_param_buf, 1); - va_status = vaRenderPicture(va_dpy, context_id, &slice_param_buf, 1); - CHECK_VASTATUS(va_status, "vaRenderPicture"); - return 0; } -int H264Encoder::save_codeddata(storage_task task) +void H264Encoder::save_codeddata(storage_task task) { VACodedBufferSegment *buf_list = NULL; VAStatus va_status; @@ -1638,7 +1660,7 @@ int H264Encoder::save_codeddata(storage_task task) string data; - const int64_t global_delay = (ip_period - 1) * (TIMEBASE / frame_rate); // So we never get negative dts. + const int64_t global_delay = (ip_period - 1) * (TIMEBASE / MAX_FPS); // So we never get negative dts. va_status = vaMapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list)); CHECK_VASTATUS(va_status, "vaMapBuffer"); @@ -1655,8 +1677,6 @@ int H264Encoder::save_codeddata(storage_task task) AVPacket pkt; memset(&pkt, 0, sizeof(pkt)); pkt.buf = nullptr; - pkt.pts = av_rescale_q(task.pts + global_delay, AVRational{1, TIMEBASE}, avstream_video->time_base); - pkt.dts = av_rescale_q(task.dts + global_delay, AVRational{1, TIMEBASE}, avstream_video->time_base); pkt.data = reinterpret_cast(&data[0]); pkt.size = data.size(); pkt.stream_index = 0; @@ -1666,32 +1686,42 @@ int H264Encoder::save_codeddata(storage_task task) pkt.flags = 0; } //pkt.duration = 1; - av_interleaved_write_frame(avctx, &pkt); + httpd->add_packet(pkt, task.pts + global_delay, task.dts + global_delay); } // Encode and add all audio frames up to and including the pts of this video frame. - // (They can never be queued to us after the video frame they belong to, only before.) for ( ;; ) { int64_t audio_pts; std::vector audio; { unique_lock lock(frame_queue_mutex); - if (pending_audio_frames.empty()) break; + frame_queue_nonempty.wait(lock, [this]{ return copy_thread_should_quit || !pending_audio_frames.empty(); }); + if (copy_thread_should_quit) return; auto it = pending_audio_frames.begin(); if (it->first > task.pts) break; audio_pts = it->first; audio = move(it->second); pending_audio_frames.erase(it); } + AVFrame *frame = avcodec_alloc_frame(); frame->nb_samples = audio.size() / 2; - frame->format = AV_SAMPLE_FMT_FLT; + frame->format = AV_SAMPLE_FMT_S32; frame->channel_layout = AV_CH_LAYOUT_STEREO; - unique_ptr planar_samples(new float[audio.size()]); - avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_FLTP, (const uint8_t*)planar_samples.get(), audio.size() * sizeof(float), 0); - for (int i = 0; i < frame->nb_samples; ++i) { - planar_samples[i] = audio[i * 2 + 0]; - planar_samples[i + frame->nb_samples] = audio[i * 2 + 1]; + unique_ptr int_samples(new int32_t[audio.size()]); + int ret = avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), audio.size() * sizeof(int32_t), 1); + if (ret < 0) { + fprintf(stderr, "avcodec_fill_audio_frame() failed with %d\n", ret); + exit(1); + } + for (int i = 0; i < frame->nb_samples * 2; ++i) { + if (audio[i] >= 1.0f) { + int_samples[i] = 2147483647; + } else if (audio[i] <= -1.0f) { + int_samples[i] = -2147483647; + } else { + int_samples[i] = lrintf(audio[i] * 2147483647.0f); + } } AVPacket pkt; @@ -1699,15 +1729,15 @@ int H264Encoder::save_codeddata(storage_task task) pkt.data = nullptr; pkt.size = 0; int got_output; - avcodec_encode_audio2(avstream_audio->codec, &pkt, frame, &got_output); + avcodec_encode_audio2(context_audio, &pkt, frame, &got_output); if (got_output) { - pkt.pts = av_rescale_q(audio_pts + global_delay, AVRational{1, TIMEBASE}, avstream_audio->time_base); - pkt.dts = pkt.pts; pkt.stream_index = 1; - av_interleaved_write_frame(avctx, &pkt); + httpd->add_packet(pkt, audio_pts + global_delay, audio_pts + global_delay); } // TODO: Delayed frames. avcodec_free_frame(&frame); + av_free_packet(&pkt); + if (audio_pts == task.pts) break; } #if 0 @@ -1729,8 +1759,6 @@ int H264Encoder::save_codeddata(storage_task task) printf("%08lld", encode_order); printf("(%06d bytes coded)", coded_size); #endif - - return 0; } @@ -1803,7 +1831,6 @@ static int print_input() if (rc_mode != -1) printf("INPUT: RateControl : %s\n", rc_to_string(rc_mode)); printf("INPUT: Resolution : %dx%dframes\n", frame_width, frame_height); - printf("INPUT: FrameRate : %d\n", frame_rate); printf("INPUT: Bitrate : %d\n", frame_bitrate); printf("INPUT: Slieces : %d\n", frame_slices); printf("INPUT: IntraPeriod : %d\n", intra_period); @@ -1818,57 +1845,22 @@ static int print_input() return 0; } - -//H264Encoder::H264Encoder(SDL_Window *window, SDL_GLContext context, int width, int height, const char *output_filename) -H264Encoder::H264Encoder(QSurface *surface, int width, int height, const char *output_filename) - : current_storage_frame(0), surface(surface) - //: width(width), height(height), current_encoding_frame(0) +H264Encoder::H264Encoder(QSurface *surface, int width, int height, HTTPD *httpd) + : current_storage_frame(0), surface(surface), httpd(httpd) { - av_register_all(); - avctx = avformat_alloc_context(); - avctx->oformat = av_guess_format(NULL, output_filename, NULL); - strcpy(avctx->filename, output_filename); - if (avio_open2(&avctx->pb, output_filename, AVIO_FLAG_WRITE, &avctx->interrupt_callback, NULL) < 0) { - fprintf(stderr, "%s: avio_open2() failed\n", output_filename); - exit(1); - } - AVCodec *codec_video = avcodec_find_encoder(AV_CODEC_ID_H264); - avstream_video = avformat_new_stream(avctx, codec_video); - if (avstream_video == nullptr) { - fprintf(stderr, "%s: avformat_new_stream() failed\n", output_filename); - exit(1); - } - avstream_video->time_base = AVRational{1, TIMEBASE}; - avstream_video->codec->width = width; - avstream_video->codec->height = height; - avstream_video->codec->time_base = AVRational{1, TIMEBASE}; - avstream_video->codec->ticks_per_frame = 1; // or 2? - - AVCodec *codec_audio = avcodec_find_encoder(AV_CODEC_ID_MP3); - avstream_audio = avformat_new_stream(avctx, codec_audio); - if (avstream_audio == nullptr) { - fprintf(stderr, "%s: avformat_new_stream() failed\n", output_filename); - exit(1); - } - avstream_audio->time_base = AVRational{1, TIMEBASE}; - avstream_audio->codec->bit_rate = 256000; - avstream_audio->codec->sample_rate = 48000; - avstream_audio->codec->sample_fmt = AV_SAMPLE_FMT_FLTP; - avstream_audio->codec->channels = 2; - avstream_audio->codec->channel_layout = AV_CH_LAYOUT_STEREO; - avstream_audio->codec->time_base = AVRational{1, TIMEBASE}; - - /* open it */ - if (avcodec_open2(avstream_audio->codec, codec_audio, NULL) < 0) { + AVCodec *codec_audio = avcodec_find_encoder(AUDIO_OUTPUT_CODEC); + context_audio = avcodec_alloc_context3(codec_audio); + context_audio->bit_rate = AUDIO_OUTPUT_BIT_RATE; + context_audio->sample_rate = OUTPUT_FREQUENCY; + context_audio->sample_fmt = AUDIO_OUTPUT_SAMPLE_FMT; + context_audio->channels = 2; + context_audio->channel_layout = AV_CH_LAYOUT_STEREO; + context_audio->time_base = AVRational{1, TIMEBASE}; + if (avcodec_open2(context_audio, codec_audio, NULL) < 0) { fprintf(stderr, "Could not open codec\n"); exit(1); } - if (avformat_write_header(avctx, NULL) < 0) { - fprintf(stderr, "%s: avformat_write_header() failed\n", output_filename); - exit(1); - } - frame_width = width; frame_height = height; frame_width_mbaligned = (frame_width + 15) & (~15); @@ -1892,7 +1884,7 @@ H264Encoder::H264Encoder(QSurface *surface, int width, int height, const char *o copy_thread = std::thread([this]{ //SDL_GL_MakeCurrent(window, context); - QOpenGLContext *context = create_context(); + QOpenGLContext *context = create_context(this->surface); eglBindAPI(EGL_OPENGL_API); if (!make_current(context, this->surface)) { printf("display=%p surface=%p context=%p curr=%p err=%d\n", eglGetCurrentDisplay(), this->surface, context, eglGetCurrentContext(), @@ -1913,16 +1905,13 @@ H264Encoder::~H264Encoder() { unique_lock lock(frame_queue_mutex); copy_thread_should_quit = true; - frame_queue_nonempty.notify_one(); + frame_queue_nonempty.notify_all(); } storage_thread.join(); copy_thread.join(); release_encode(); deinit_va(); - - av_write_trailer(avctx); - avformat_free_context(avctx); } bool H264Encoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex) @@ -1995,10 +1984,9 @@ void H264Encoder::add_audio(int64_t pts, std::vector audio) unique_lock lock(frame_queue_mutex); pending_audio_frames[pts] = move(audio); } - frame_queue_nonempty.notify_one(); + frame_queue_nonempty.notify_all(); } - void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const std::vector &input_frames) { { @@ -2006,7 +1994,7 @@ void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const std::vect pending_video_frames[current_storage_frame] = PendingFrame{ fence, input_frames, pts }; ++current_storage_frame; } - frame_queue_nonempty.notify_one(); + frame_queue_nonempty.notify_all(); } void H264Encoder::copy_thread_func() @@ -2073,7 +2061,7 @@ void H264Encoder::copy_thread_func() int64_t dts; if (pts_lag == -1) { assert(last_dts != -1); - dts = last_dts + (TIMEBASE / frame_rate); + dts = last_dts + (TIMEBASE / MAX_FPS); } else { dts = pts - pts_lag; }