]> git.sesse.net Git - nageru/blob - video_encoder.cpp
Add support for recording the x264 video to disk.
[nageru] / video_encoder.cpp
1 #include "video_encoder.h"
2
3 #include <assert.h>
4 #include <stdio.h>
5 #include <time.h>
6 #include <unistd.h>
7 #include <string>
8 #include <thread>
9
10 extern "C" {
11 #include <libavutil/mem.h>
12 }
13
14 #include "audio_encoder.h"
15 #include "defs.h"
16 #include "ffmpeg_raii.h"
17 #include "flags.h"
18 #include "httpd.h"
19 #include "mux.h"
20 #include "quicksync_encoder.h"
21 #include "timebase.h"
22 #include "x264_encoder.h"
23
24 class RefCountedFrame;
25
26 using namespace std;
27 using namespace movit;
28
29 namespace {
30
31 string generate_local_dump_filename(int frame)
32 {
33         time_t now = time(NULL);
34         tm now_tm;
35         localtime_r(&now, &now_tm);
36
37         char timestamp[256];
38         strftime(timestamp, sizeof(timestamp), "%F-%T%z", &now_tm);
39
40         // Use the frame number to disambiguate between two cuts starting
41         // on the same second.
42         char filename[256];
43         snprintf(filename, sizeof(filename), "%s%s-f%02d%s",
44                 LOCAL_DUMP_PREFIX, timestamp, frame % 100, LOCAL_DUMP_SUFFIX);
45         return filename;
46 }
47
48 }  // namespace
49
50 VideoEncoder::VideoEncoder(ResourcePool *resource_pool, QSurface *surface, const std::string &va_display, int width, int height, HTTPD *httpd, DiskSpaceEstimator *disk_space_estimator)
51         : resource_pool(resource_pool), surface(surface), va_display(va_display), width(width), height(height), httpd(httpd), disk_space_estimator(disk_space_estimator)
52 {
53         oformat = av_guess_format(global_flags.stream_mux_name.c_str(), nullptr, nullptr);
54         assert(oformat != nullptr);
55         if (global_flags.stream_audio_codec_name.empty()) {
56                 stream_audio_encoder.reset(new AudioEncoder(AUDIO_OUTPUT_CODEC_NAME, DEFAULT_AUDIO_OUTPUT_BIT_RATE, oformat));
57         } else {
58                 stream_audio_encoder.reset(new AudioEncoder(global_flags.stream_audio_codec_name, global_flags.stream_audio_codec_bitrate, oformat));
59         }
60         if (global_flags.x264_video_to_http || global_flags.x264_video_to_disk) {
61                 x264_encoder.reset(new X264Encoder(oformat));
62         }
63
64         string filename = generate_local_dump_filename(/*frame=*/0);
65         quicksync_encoder.reset(new QuickSyncEncoder(filename, resource_pool, surface, va_display, width, height, oformat, x264_encoder.get(), disk_space_estimator));
66
67         open_output_stream();
68         stream_audio_encoder->add_mux(stream_mux.get());
69         quicksync_encoder->set_stream_mux(stream_mux.get());
70         if (global_flags.x264_video_to_http) {
71                 x264_encoder->add_mux(stream_mux.get());
72         }
73 }
74
75 VideoEncoder::~VideoEncoder()
76 {
77         quicksync_encoder->shutdown();
78         x264_encoder.reset(nullptr);
79         quicksync_encoder->close_file();
80         quicksync_encoder.reset(nullptr);
81         while (quicksync_encoders_in_shutdown.load() > 0) {
82                 usleep(10000);
83         }
84 }
85
86 void VideoEncoder::do_cut(int frame)
87 {
88         string filename = generate_local_dump_filename(frame);
89         printf("Starting new recording: %s\n", filename.c_str());
90
91         // Do the shutdown of the old encoder in a separate thread, since it can
92         // take some time (it needs to wait for all the frames in the queue to be
93         // done encoding, for one) and we are running on the main mixer thread.
94         // However, since this means both encoders could be sending packets at
95         // the same time, it means pts could come out of order to the stream mux,
96         // and we need to plug it until the shutdown is complete.
97         stream_mux->plug();
98         lock_guard<mutex> lock(qs_mu);
99         QuickSyncEncoder *old_encoder = quicksync_encoder.release();  // When we go C++14, we can use move capture instead.
100         X264Encoder *old_x264_encoder = nullptr;
101         if (global_flags.x264_video_to_disk) {
102                 old_x264_encoder = x264_encoder.release();
103         }
104         thread([old_encoder, old_x264_encoder, this]{
105                 old_encoder->shutdown();
106                 delete old_x264_encoder;
107                 old_encoder->close_file();
108                 stream_mux->unplug();
109
110                 // We cannot delete the encoder here, as this thread has no OpenGL context.
111                 // We'll deal with it in begin_frame().
112                 lock_guard<mutex> lock(qs_mu);
113                 qs_needing_cleanup.emplace_back(old_encoder);
114         }).detach();
115
116         if (global_flags.x264_video_to_disk) {
117                 x264_encoder.reset(new X264Encoder(oformat));
118                 if (global_flags.x264_video_to_http) {
119                         x264_encoder->add_mux(stream_mux.get());
120                 }
121                 if (overriding_bitrate != 0) {
122                         x264_encoder->change_bitrate(overriding_bitrate);
123                 }
124         }
125
126         quicksync_encoder.reset(new QuickSyncEncoder(filename, resource_pool, surface, va_display, width, height, oformat, x264_encoder.get(), disk_space_estimator));
127         quicksync_encoder->set_stream_mux(stream_mux.get());
128 }
129
130 void VideoEncoder::change_x264_bitrate(unsigned rate_kbit)
131 {
132         overriding_bitrate = rate_kbit;
133         x264_encoder->change_bitrate(rate_kbit);
134 }
135
136 void VideoEncoder::add_audio(int64_t pts, std::vector<float> audio)
137 {
138         lock_guard<mutex> lock(qs_mu);
139         quicksync_encoder->add_audio(pts, audio);
140         stream_audio_encoder->encode_audio(audio, pts + quicksync_encoder->global_delay());
141 }
142
143 bool VideoEncoder::begin_frame(int64_t pts, int64_t duration, movit::YCbCrLumaCoefficients ycbcr_coefficients, const std::vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex)
144 {
145         lock_guard<mutex> lock(qs_mu);
146         qs_needing_cleanup.clear();  // Since we have an OpenGL context here, and are called regularly.
147         return quicksync_encoder->begin_frame(pts, duration, ycbcr_coefficients, input_frames, y_tex, cbcr_tex);
148 }
149
150 RefCountedGLsync VideoEncoder::end_frame()
151 {
152         lock_guard<mutex> lock(qs_mu);
153         return quicksync_encoder->end_frame();
154 }
155
156 void VideoEncoder::open_output_stream()
157 {
158         AVFormatContext *avctx = avformat_alloc_context();
159         avctx->oformat = oformat;
160
161         uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
162         avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
163         avctx->pb->write_data_type = &VideoEncoder::write_packet2_thunk;
164         avctx->pb->ignore_boundary_point = 1;
165
166         Mux::Codec video_codec;
167         if (global_flags.uncompressed_video_to_http) {
168                 video_codec = Mux::CODEC_NV12;
169         } else {
170                 video_codec = Mux::CODEC_H264;
171         }
172
173         avctx->flags = AVFMT_FLAG_CUSTOM_IO;
174
175         string video_extradata;
176         if (global_flags.x264_video_to_http || global_flags.x264_video_to_disk) {
177                 video_extradata = x264_encoder->get_global_headers();
178         }
179
180         int time_base = global_flags.stream_coarse_timebase ? COARSE_TIMEBASE : TIMEBASE;
181         stream_mux.reset(new Mux(avctx, width, height, video_codec, video_extradata, stream_audio_encoder->get_codec_parameters().get(), time_base,
182                 /*write_callback=*/nullptr));
183 }
184
185 int VideoEncoder::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
186 {
187         VideoEncoder *video_encoder = (VideoEncoder *)opaque;
188         return video_encoder->write_packet2(buf, buf_size, type, time);
189 }
190
191 int VideoEncoder::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
192 {
193         if (type == AVIO_DATA_MARKER_SYNC_POINT || type == AVIO_DATA_MARKER_BOUNDARY_POINT) {
194                 seen_sync_markers = true;
195         } else if (type == AVIO_DATA_MARKER_UNKNOWN && !seen_sync_markers) {
196                 // We don't know if this is a keyframe or not (the muxer could
197                 // avoid marking it), so we just have to make the best of it.
198                 type = AVIO_DATA_MARKER_SYNC_POINT;
199         }
200
201         if (type == AVIO_DATA_MARKER_HEADER) {
202                 stream_mux_header.append((char *)buf, buf_size);
203                 httpd->set_header(stream_mux_header);
204         } else {
205                 httpd->add_data((char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT);
206         }
207         return buf_size;
208 }
209