]> git.sesse.net Git - nageru/blob - video_stream.cpp
Do the interpolation in Y'CbCr instead of RGBA; saves some conversions back and forth...
[nageru] / video_stream.cpp
1 #include "video_stream.h"
2
3 extern "C" {
4 #include <libavformat/avformat.h>
5 #include <libavformat/avio.h>
6 }
7
8 #include <jpeglib.h>
9 #include <unistd.h>
10
11 #include "context.h"
12 #include "flow.h"
13 #include "httpd.h"
14 #include "jpeg_frame_view.h"
15 #include "movit/util.h"
16 #include "mux.h"
17 #include "player.h"
18 #include "util.h"
19
20 #include <epoxy/glx.h>
21
22 using namespace std;
23
24 extern HTTPD *global_httpd;
25
26 namespace {
27
28 string read_file(const string &filename)
29 {
30         FILE *fp = fopen(filename.c_str(), "rb");
31         if (fp == nullptr) {
32                 perror(filename.c_str());
33                 return "";
34         }
35
36         fseek(fp, 0, SEEK_END);
37         long len = ftell(fp);
38         rewind(fp);
39
40         string ret;
41         ret.resize(len);
42         fread(&ret[0], len, 1, fp);
43         fclose(fp);
44         return ret;
45 }
46
47 }  // namespace
48
49 struct VectorDestinationManager {
50         jpeg_destination_mgr pub;
51         std::vector<uint8_t> dest;
52
53         VectorDestinationManager()
54         {
55                 pub.init_destination = init_destination_thunk;
56                 pub.empty_output_buffer = empty_output_buffer_thunk;
57                 pub.term_destination = term_destination_thunk;
58         }
59
60         static void init_destination_thunk(j_compress_ptr ptr)
61         {
62                 ((VectorDestinationManager *)(ptr->dest))->init_destination();
63         }
64
65         inline void init_destination()
66         {
67                 make_room(0);
68         }
69
70         static boolean empty_output_buffer_thunk(j_compress_ptr ptr)
71         {
72                 return ((VectorDestinationManager *)(ptr->dest))->empty_output_buffer();
73         }
74
75         inline bool empty_output_buffer()
76         {
77                 make_room(dest.size());  // Should ignore pub.free_in_buffer!
78                 return true;
79         }
80
81         inline void make_room(size_t bytes_used)
82         {
83                 dest.resize(bytes_used + 4096);
84                 dest.resize(dest.capacity());
85                 pub.next_output_byte = dest.data() + bytes_used;
86                 pub.free_in_buffer = dest.size() - bytes_used;
87         }
88
89         static void term_destination_thunk(j_compress_ptr ptr)
90         {
91                 ((VectorDestinationManager *)(ptr->dest))->term_destination();
92         }
93
94         inline void term_destination()
95         {
96                 dest.resize(dest.size() - pub.free_in_buffer);
97         }
98 };
99 static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
100
101 vector<uint8_t> encode_jpeg(const uint8_t *pixel_data, unsigned width, unsigned height)
102 {
103         VectorDestinationManager dest;
104
105         jpeg_compress_struct cinfo;
106         jpeg_error_mgr jerr;
107         cinfo.err = jpeg_std_error(&jerr);
108         jpeg_create_compress(&cinfo);
109
110         cinfo.dest = (jpeg_destination_mgr *)&dest;
111         cinfo.input_components = 3;
112         cinfo.in_color_space = JCS_RGB;
113         jpeg_set_defaults(&cinfo);
114         constexpr int quality = 90;
115         jpeg_set_quality(&cinfo, quality, /*force_baseline=*/false);
116
117         cinfo.image_width = width;
118         cinfo.image_height = height;
119         cinfo.raw_data_in = true;
120         jpeg_set_colorspace(&cinfo, JCS_YCbCr);
121         cinfo.comp_info[0].h_samp_factor = 2;
122         cinfo.comp_info[0].v_samp_factor = 1;
123         cinfo.comp_info[1].h_samp_factor = 1;
124         cinfo.comp_info[1].v_samp_factor = 1;
125         cinfo.comp_info[2].h_samp_factor = 1;
126         cinfo.comp_info[2].v_samp_factor = 1;
127         cinfo.CCIR601_sampling = true;  // Seems to be mostly ignored by libjpeg, though.
128         jpeg_start_compress(&cinfo, true);
129
130         // TODO: Subsample and deinterleave on the GPU.
131
132         unique_ptr<uint8_t[]> ydata(new uint8_t[width * 8]);
133         unique_ptr<uint8_t[]> cbdata(new uint8_t[(width/2) * 8]);
134         unique_ptr<uint8_t[]> crdata(new uint8_t[(width/2) * 8]);
135         JSAMPROW yptr[8], cbptr[8], crptr[8];
136         JSAMPARRAY data[3] = { yptr, cbptr, crptr };
137         for (unsigned yy = 0; yy < 8; ++yy) {
138                 yptr[yy] = ydata.get() + yy * width;
139                 cbptr[yy] = cbdata.get() + yy * (width / 2);
140                 crptr[yy] = crdata.get() + yy * (width / 2);
141         }
142         for (unsigned y = 0; y < height; y += 8) {
143                 uint8_t *yptr = ydata.get();
144                 uint8_t *cbptr = cbdata.get();
145                 uint8_t *crptr = crdata.get();
146                 for (unsigned yy = 0; yy < 8; ++yy) {
147                         const uint8_t *sptr = &pixel_data[(height - y - yy - 1) * width * 4];
148                         for (unsigned x = 0; x < width; x += 2) {
149                                 *yptr++ = sptr[0];
150                                 *yptr++ = sptr[4];
151                                 *cbptr++ = (sptr[1] + sptr[5]) / 2;
152                                 *crptr++ = (sptr[2] + sptr[6]) / 2;
153                                 sptr += 8;
154                         }
155                 }
156
157                 jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8);
158         }
159
160         jpeg_finish_compress(&cinfo);
161         jpeg_destroy_compress(&cinfo);
162
163         return move(dest.dest);
164 }
165
166 VideoStream::VideoStream()
167 {
168         using namespace movit;
169         // TODO: deduplicate code against JPEGFrameView?
170         ycbcr_convert_chain.reset(new EffectChain(1280, 720));
171         ImageFormat image_format;
172         image_format.color_space = COLORSPACE_sRGB;
173         image_format.gamma_curve = GAMMA_sRGB;
174         ycbcr_format.luma_coefficients = YCBCR_REC_709;
175         ycbcr_format.full_range = true;  // JPEG.
176         ycbcr_format.num_levels = 256;
177         ycbcr_format.chroma_subsampling_x = 2;
178         ycbcr_format.chroma_subsampling_y = 1;
179         ycbcr_format.cb_x_position = 0.0f;  // H.264 -- _not_ JPEG, even though our input is MJPEG-encoded
180         ycbcr_format.cb_y_position = 0.5f;  // Irrelevant.
181         ycbcr_format.cr_x_position = 0.0f;
182         ycbcr_format.cr_y_position = 0.5f;
183         ycbcr_input = (movit::YCbCrInput *)ycbcr_convert_chain->add_input(new YCbCrInput(image_format, ycbcr_format, 1280, 720));
184
185         YCbCrFormat ycbcr_output_format = ycbcr_format;
186         ycbcr_output_format.chroma_subsampling_x = 1;
187
188         ImageFormat inout_format;
189         inout_format.color_space = COLORSPACE_sRGB;
190         inout_format.gamma_curve = GAMMA_sRGB;
191
192         check_error();
193
194         // One full Y'CbCr texture (for interpolation), one that's just Y (throwing away the
195         // Cb and Cr channels). The second copy is sort of redundant, but it's the easiest way
196         // of getting the gray data into a layered texture.
197         ycbcr_convert_chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, ycbcr_output_format);
198         check_error();
199         ycbcr_convert_chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, ycbcr_output_format);
200         check_error();
201         ycbcr_convert_chain->set_dither_bits(8);
202         check_error();
203         ycbcr_convert_chain->finalize();
204         check_error();
205
206         GLuint input_tex[num_interpolate_slots], gray_tex[num_interpolate_slots];
207         glCreateTextures(GL_TEXTURE_2D_ARRAY, 10, input_tex);
208         glCreateTextures(GL_TEXTURE_2D_ARRAY, 10, gray_tex);
209         check_error();
210         constexpr size_t width = 1280, height = 720;  // FIXME: adjustable width, height
211         int levels = find_num_levels(width, height);
212         for (size_t i = 0; i < num_interpolate_slots; ++i) {
213                 glTextureStorage3D(input_tex[i], levels, GL_RGBA8, width, height, 2);
214                 check_error();
215                 glTextureStorage3D(gray_tex[i], levels, GL_R8, width, height, 2);
216                 check_error();
217
218                 InterpolatedFrameResources resource;
219                 resource.input_tex = input_tex[i];
220                 resource.gray_tex = gray_tex[i];
221                 glCreateFramebuffers(2, resource.input_fbos);
222                 check_error();
223
224                 glNamedFramebufferTextureLayer(resource.input_fbos[0], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 0);
225                 check_error();
226                 glNamedFramebufferTextureLayer(resource.input_fbos[0], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 0);
227                 check_error();
228                 glNamedFramebufferTextureLayer(resource.input_fbos[1], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 1);
229                 check_error();
230                 glNamedFramebufferTextureLayer(resource.input_fbos[1], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 1);
231                 check_error();
232
233                 GLuint bufs[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 };
234                 glNamedFramebufferDrawBuffers(resource.input_fbos[0], 2, bufs);
235                 check_error();
236                 glNamedFramebufferDrawBuffers(resource.input_fbos[1], 2, bufs);
237                 check_error();
238
239                 glCreateBuffers(1, &resource.pbo);
240                 check_error();
241                 glNamedBufferStorage(resource.pbo, width * height * 4, nullptr, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
242                 check_error();
243                 resource.pbo_contents = glMapNamedBufferRange(resource.pbo, 0, width * height * 4, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT); 
244                 interpolate_resources.push_back(resource);
245         }
246
247         check_error();
248
249         compute_flow.reset(new DISComputeFlow(width, height, operating_point3));
250         interpolate.reset(new Interpolate(width, height, operating_point3));
251         check_error();
252 }
253
254 VideoStream::~VideoStream() {}
255
256 void VideoStream::start()
257 {
258         AVFormatContext *avctx = avformat_alloc_context();
259         avctx->oformat = av_guess_format("nut", nullptr, nullptr);
260
261         uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
262         avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
263         avctx->pb->write_data_type = &VideoStream::write_packet2_thunk;
264         avctx->pb->ignore_boundary_point = 1;
265
266         Mux::Codec video_codec = Mux::CODEC_MJPEG;
267
268         avctx->flags = AVFMT_FLAG_CUSTOM_IO;
269
270         string video_extradata;
271
272         constexpr int width = 1280, height = 720;  // Doesn't matter for MJPEG.
273         stream_mux.reset(new Mux(avctx, width, height, video_codec, video_extradata, /*audio_codec_parameters=*/nullptr, COARSE_TIMEBASE,
274                 /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}));
275
276
277         encode_thread = thread(&VideoStream::encode_thread_func, this);
278 }
279
280 void VideoStream::stop()
281 {
282         encode_thread.join();
283 }
284
285 void VideoStream::schedule_original_frame(int64_t output_pts, unsigned stream_idx, int64_t input_pts)
286 {
287         fprintf(stderr, "output_pts=%ld  original      input_pts=%ld\n", output_pts, input_pts);
288
289         QueuedFrame qf;
290         qf.type = QueuedFrame::ORIGINAL;
291         qf.output_pts = output_pts;
292         qf.stream_idx = stream_idx;
293         qf.input_first_pts = input_pts; 
294
295         unique_lock<mutex> lock(queue_lock);
296         frame_queue.push_back(qf);
297         queue_nonempty.notify_all();
298 }
299
300 void VideoStream::schedule_interpolated_frame(int64_t output_pts, unsigned stream_idx, int64_t input_first_pts, int64_t input_second_pts, float alpha)
301 {
302         fprintf(stderr, "output_pts=%ld  interpolated  input_pts1=%ld input_pts2=%ld alpha=%.3f\n", output_pts, input_first_pts, input_second_pts, alpha);
303
304         // Get the temporary OpenGL resources we need for doing the interpolation.
305         InterpolatedFrameResources resources;
306         {
307                 unique_lock<mutex> lock(queue_lock);
308                 if (interpolate_resources.empty()) {
309                         fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
310                         return;
311                 }
312                 resources = interpolate_resources.front();
313                 interpolate_resources.pop_front();
314         }
315
316         QueuedFrame qf;
317         qf.type = QueuedFrame::INTERPOLATED;
318         qf.output_pts = output_pts;
319         qf.stream_idx = stream_idx;
320         qf.resources = resources;
321
322         check_error();
323
324         // Convert frame0 and frame1 to OpenGL textures.
325         // TODO: Deduplicate against JPEGFrameView::setDecodedFrame?
326         for (size_t frame_no = 0; frame_no < 2; ++frame_no) {
327                 JPEGID jpeg_id;
328                 jpeg_id.stream_idx = stream_idx;
329                 jpeg_id.pts = frame_no == 1 ? input_second_pts : input_first_pts;
330                 bool did_decode;
331                 shared_ptr<Frame> frame = decode_jpeg_with_cache(jpeg_id, DECODE_IF_NOT_IN_CACHE, &did_decode);
332                 ycbcr_format.chroma_subsampling_x = frame->chroma_subsampling_x;
333                 ycbcr_format.chroma_subsampling_y = frame->chroma_subsampling_y;
334                 ycbcr_input->change_ycbcr_format(ycbcr_format);
335                 ycbcr_input->set_width(frame->width);
336                 ycbcr_input->set_height(frame->height);
337                 ycbcr_input->set_pixel_data(0, frame->y.get());
338                 ycbcr_input->set_pixel_data(1, frame->cb.get());
339                 ycbcr_input->set_pixel_data(2, frame->cr.get());
340                 ycbcr_input->set_pitch(0, frame->pitch_y);
341                 ycbcr_input->set_pitch(1, frame->pitch_chroma);
342                 ycbcr_input->set_pitch(2, frame->pitch_chroma);
343                 ycbcr_convert_chain->render_to_fbo(resources.input_fbos[frame_no], 1280, 720);
344         }
345
346         glGenerateTextureMipmap(resources.input_tex);
347         check_error();
348         glGenerateTextureMipmap(resources.gray_tex);
349         check_error();
350
351         // Compute the interpolated frame.
352         qf.flow_tex = compute_flow->exec(resources.gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
353         check_error();
354         qf.output_tex = interpolate->exec(resources.input_tex, resources.gray_tex, qf.flow_tex, 1280, 720, alpha);
355         check_error();
356
357         // We could have released qf.flow_tex here, but to make sure we don't cause a stall
358         // when trying to reuse it for the next frame, we can just as well hold on to it
359         // and release it only when the readback is done.
360
361         // Read it down (asynchronously) to the CPU.
362         glPixelStorei(GL_PACK_ROW_LENGTH, 0);
363         glBindBuffer(GL_PIXEL_PACK_BUFFER, resources.pbo);
364         check_error();
365         glGetTextureImage(qf.output_tex, 0, GL_RGBA, GL_UNSIGNED_BYTE, 1280 * 720 * 4, nullptr);
366         check_error();
367         glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
368
369         // Set a fence we can wait for to make sure the CPU sees the read.
370         glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
371         check_error();
372         qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
373         check_error();
374
375         unique_lock<mutex> lock(queue_lock);
376         frame_queue.push_back(qf);
377         queue_nonempty.notify_all();
378 }
379
380 void VideoStream::encode_thread_func()
381 {
382         pthread_setname_np(pthread_self(), "VideoStream");
383         QSurface *surface = create_surface();
384         QOpenGLContext *context = create_context(surface);
385         bool ok = make_current(context, surface);
386         if (!ok) {
387                 fprintf(stderr, "Video stream couldn't get an OpenGL context\n");
388                 exit(1);
389         }
390
391         for ( ;; ) {
392                 QueuedFrame qf;
393                 {
394                         unique_lock<mutex> lock(queue_lock);
395                         queue_nonempty.wait(lock, [this]{
396                                 return !frame_queue.empty();
397                         });
398                         qf = frame_queue.front();
399                         frame_queue.pop_front();
400                 }
401
402                 if (qf.type == QueuedFrame::ORIGINAL) {
403                         // Send the JPEG frame on, unchanged.
404                         string jpeg = read_file(filename_for_frame(qf.stream_idx, qf.input_first_pts));
405                         AVPacket pkt;
406                         av_init_packet(&pkt);
407                         pkt.stream_index = 0;
408                         pkt.data = (uint8_t *)jpeg.data();
409                         pkt.size = jpeg.size();
410                         stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
411                 } else if (qf.type == QueuedFrame::INTERPOLATED) {
412                         glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
413
414                         vector<uint8_t> jpeg = encode_jpeg((const uint8_t *)qf.resources.pbo_contents, 1280, 720);
415                         compute_flow->release_texture(qf.flow_tex);
416                         interpolate->release_texture(qf.output_tex);
417
418                         AVPacket pkt;
419                         av_init_packet(&pkt);
420                         pkt.stream_index = 0;
421                         pkt.data = (uint8_t *)jpeg.data();
422                         pkt.size = jpeg.size();
423                         stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
424
425                         // Put the frame resources back.
426                         unique_lock<mutex> lock(queue_lock);
427                         interpolate_resources.push_back(qf.resources);
428                 }
429         }
430 }
431
432 int VideoStream::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
433 {
434         VideoStream *video_stream = (VideoStream *)opaque;
435         return video_stream->write_packet2(buf, buf_size, type, time);
436 }
437
438 int VideoStream::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
439 {
440         if (type == AVIO_DATA_MARKER_SYNC_POINT || type == AVIO_DATA_MARKER_BOUNDARY_POINT) {
441                 seen_sync_markers = true;
442         } else if (type == AVIO_DATA_MARKER_UNKNOWN && !seen_sync_markers) {
443                 // We don't know if this is a keyframe or not (the muxer could
444                 // avoid marking it), so we just have to make the best of it.
445                 type = AVIO_DATA_MARKER_SYNC_POINT;
446         }
447
448         if (type == AVIO_DATA_MARKER_HEADER) {
449                 stream_mux_header.append((char *)buf, buf_size);
450                 global_httpd->set_header(stream_mux_header);
451         } else {
452                 global_httpd->add_data((char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
453         }
454         return buf_size;
455 }
456