]> git.sesse.net Git - nageru/blob - video_stream.cpp
Fix some Clang warnings.
[nageru] / video_stream.cpp
1 #include "video_stream.h"
2
3 extern "C" {
4 #include <libavformat/avformat.h>
5 #include <libavformat/avio.h>
6 }
7
8 #include <jpeglib.h>
9 #include <unistd.h>
10
11 #include "chroma_subsampler.h"
12 #include "context.h"
13 #include "flow.h"
14 #include "httpd.h"
15 #include "jpeg_frame_view.h"
16 #include "movit/util.h"
17 #include "mux.h"
18 #include "player.h"
19 #include "util.h"
20
21 #include <epoxy/glx.h>
22
23 using namespace std;
24
25 extern HTTPD *global_httpd;
26
27 namespace {
28
29 string read_file(const string &filename)
30 {
31         FILE *fp = fopen(filename.c_str(), "rb");
32         if (fp == nullptr) {
33                 perror(filename.c_str());
34                 return "";
35         }
36
37         fseek(fp, 0, SEEK_END);
38         long len = ftell(fp);
39         rewind(fp);
40
41         string ret;
42         ret.resize(len);
43         fread(&ret[0], len, 1, fp);
44         fclose(fp);
45         return ret;
46 }
47
48 }  // namespace
49
50 struct VectorDestinationManager {
51         jpeg_destination_mgr pub;
52         std::vector<uint8_t> dest;
53
54         VectorDestinationManager()
55         {
56                 pub.init_destination = init_destination_thunk;
57                 pub.empty_output_buffer = empty_output_buffer_thunk;
58                 pub.term_destination = term_destination_thunk;
59         }
60
61         static void init_destination_thunk(j_compress_ptr ptr)
62         {
63                 ((VectorDestinationManager *)(ptr->dest))->init_destination();
64         }
65
66         inline void init_destination()
67         {
68                 make_room(0);
69         }
70
71         static boolean empty_output_buffer_thunk(j_compress_ptr ptr)
72         {
73                 return ((VectorDestinationManager *)(ptr->dest))->empty_output_buffer();
74         }
75
76         inline bool empty_output_buffer()
77         {
78                 make_room(dest.size());  // Should ignore pub.free_in_buffer!
79                 return true;
80         }
81
82         inline void make_room(size_t bytes_used)
83         {
84                 dest.resize(bytes_used + 4096);
85                 dest.resize(dest.capacity());
86                 pub.next_output_byte = dest.data() + bytes_used;
87                 pub.free_in_buffer = dest.size() - bytes_used;
88         }
89
90         static void term_destination_thunk(j_compress_ptr ptr)
91         {
92                 ((VectorDestinationManager *)(ptr->dest))->term_destination();
93         }
94
95         inline void term_destination()
96         {
97                 dest.resize(dest.size() - pub.free_in_buffer);
98         }
99 };
100 static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
101
102 vector<uint8_t> encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height)
103 {
104         VectorDestinationManager dest;
105
106         jpeg_compress_struct cinfo;
107         jpeg_error_mgr jerr;
108         cinfo.err = jpeg_std_error(&jerr);
109         jpeg_create_compress(&cinfo);
110
111         cinfo.dest = (jpeg_destination_mgr *)&dest;
112         cinfo.input_components = 3;
113         cinfo.in_color_space = JCS_RGB;
114         jpeg_set_defaults(&cinfo);
115         constexpr int quality = 90;
116         jpeg_set_quality(&cinfo, quality, /*force_baseline=*/false);
117
118         cinfo.image_width = width;
119         cinfo.image_height = height;
120         cinfo.raw_data_in = true;
121         jpeg_set_colorspace(&cinfo, JCS_YCbCr);
122         cinfo.comp_info[0].h_samp_factor = 2;
123         cinfo.comp_info[0].v_samp_factor = 1;
124         cinfo.comp_info[1].h_samp_factor = 1;
125         cinfo.comp_info[1].v_samp_factor = 1;
126         cinfo.comp_info[2].h_samp_factor = 1;
127         cinfo.comp_info[2].v_samp_factor = 1;
128         cinfo.CCIR601_sampling = true;  // Seems to be mostly ignored by libjpeg, though.
129         jpeg_start_compress(&cinfo, true);
130
131         JSAMPROW yptr[8], cbptr[8], crptr[8];
132         JSAMPARRAY data[3] = { yptr, cbptr, crptr };
133         for (unsigned y = 0; y < height; y += 8) {
134                 for (unsigned yy = 0; yy < 8; ++yy) {
135                         yptr[yy] = const_cast<JSAMPROW>(&y_data[(height - y - yy - 1) * width]);
136                         cbptr[yy] = const_cast<JSAMPROW>(&cb_data[(height - y - yy - 1) * width/2]);
137                         crptr[yy] = const_cast<JSAMPROW>(&cr_data[(height - y - yy - 1) * width/2]);
138                 }
139
140                 jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8);
141         }
142
143         jpeg_finish_compress(&cinfo);
144         jpeg_destroy_compress(&cinfo);
145
146         return move(dest.dest);
147 }
148
149 VideoStream::VideoStream()
150 {
151         using namespace movit;
152
153         ImageFormat inout_format;
154         inout_format.color_space = COLORSPACE_sRGB;
155         inout_format.gamma_curve = GAMMA_sRGB;
156
157         ycbcr_format.luma_coefficients = YCBCR_REC_709;
158         ycbcr_format.full_range = true;  // JPEG.
159         ycbcr_format.num_levels = 256;
160         ycbcr_format.chroma_subsampling_x = 2;
161         ycbcr_format.chroma_subsampling_y = 1;
162         ycbcr_format.cb_x_position = 0.0f;  // H.264 -- _not_ JPEG, even though our input is MJPEG-encoded
163         ycbcr_format.cb_y_position = 0.5f;  // Irrelevant.
164         ycbcr_format.cr_x_position = 0.0f;
165         ycbcr_format.cr_y_position = 0.5f;
166
167         YCbCrFormat ycbcr_output_format = ycbcr_format;
168         ycbcr_output_format.chroma_subsampling_x = 1;
169
170         // TODO: deduplicate code against JPEGFrameView?
171         ycbcr_planar_convert_chain.reset(new EffectChain(1280, 720));
172         ycbcr_planar_input = (movit::YCbCrInput *)ycbcr_planar_convert_chain->add_input(new YCbCrInput(inout_format, ycbcr_format, 1280, 720, YCBCR_INPUT_PLANAR));
173
174         // One full Y'CbCr texture (for interpolation), one that's just Y (throwing away the
175         // Cb and Cr channels). The second copy is sort of redundant, but it's the easiest way
176         // of getting the gray data into a layered texture.
177         ycbcr_planar_convert_chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, ycbcr_output_format);
178         ycbcr_planar_convert_chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, ycbcr_output_format);
179         ycbcr_planar_convert_chain->set_dither_bits(8);
180         ycbcr_planar_convert_chain->finalize();
181
182         // Same, for semiplanar inputs.
183         ycbcr_semiplanar_convert_chain.reset(new EffectChain(1280, 720));
184         ycbcr_semiplanar_input = (movit::YCbCrInput *)ycbcr_semiplanar_convert_chain->add_input(new YCbCrInput(inout_format, ycbcr_format, 1280, 720, YCBCR_INPUT_SPLIT_Y_AND_CBCR));
185
186         // One full Y'CbCr texture (for interpolation), one that's just Y (throwing away the
187         // Cb and Cr channels). The second copy is sort of redundant, but it's the easiest way
188         // of getting the gray data into a layered texture.
189         ycbcr_semiplanar_convert_chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, ycbcr_output_format);
190         ycbcr_semiplanar_convert_chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, ycbcr_output_format);
191         ycbcr_semiplanar_convert_chain->set_dither_bits(8);
192         ycbcr_semiplanar_convert_chain->finalize();
193
194         GLuint input_tex[num_interpolate_slots], gray_tex[num_interpolate_slots], cb_tex[num_interpolate_slots], cr_tex[num_interpolate_slots];
195         glCreateTextures(GL_TEXTURE_2D_ARRAY, 10, input_tex);
196         glCreateTextures(GL_TEXTURE_2D_ARRAY, 10, gray_tex);
197         glCreateTextures(GL_TEXTURE_2D, 10, cb_tex);
198         glCreateTextures(GL_TEXTURE_2D, 10, cr_tex);
199         check_error();
200         constexpr size_t width = 1280, height = 720;  // FIXME: adjustable width, height
201         int levels = find_num_levels(width, height);
202         for (size_t i = 0; i < num_interpolate_slots; ++i) {
203                 glTextureStorage3D(input_tex[i], levels, GL_RGBA8, width, height, 2);
204                 check_error();
205                 glTextureStorage3D(gray_tex[i], levels, GL_R8, width, height, 2);
206                 check_error();
207                 glTextureStorage2D(cb_tex[i], 1, GL_R8, width / 2, height);
208                 check_error();
209                 glTextureStorage2D(cr_tex[i], 1, GL_R8, width / 2, height);
210                 check_error();
211
212                 InterpolatedFrameResources resource;
213                 resource.input_tex = input_tex[i];
214                 resource.gray_tex = gray_tex[i];
215                 resource.cb_tex = cb_tex[i];
216                 resource.cr_tex = cr_tex[i];
217                 glCreateFramebuffers(2, resource.input_fbos);
218                 check_error();
219
220                 glNamedFramebufferTextureLayer(resource.input_fbos[0], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 0);
221                 check_error();
222                 glNamedFramebufferTextureLayer(resource.input_fbos[0], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 0);
223                 check_error();
224                 glNamedFramebufferTextureLayer(resource.input_fbos[1], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 1);
225                 check_error();
226                 glNamedFramebufferTextureLayer(resource.input_fbos[1], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 1);
227                 check_error();
228
229                 GLuint bufs[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 };
230                 glNamedFramebufferDrawBuffers(resource.input_fbos[0], 2, bufs);
231                 check_error();
232                 glNamedFramebufferDrawBuffers(resource.input_fbos[1], 2, bufs);
233                 check_error();
234
235                 glCreateBuffers(1, &resource.pbo);
236                 check_error();
237                 glNamedBufferStorage(resource.pbo, width * height * 4, nullptr, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
238                 check_error();
239                 resource.pbo_contents = glMapNamedBufferRange(resource.pbo, 0, width * height * 4, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT); 
240                 interpolate_resources.push_back(resource);
241         }
242
243         check_error();
244
245         compute_flow.reset(new DISComputeFlow(width, height, operating_point2));
246         interpolate.reset(new Interpolate(operating_point2, /*split_ycbcr_output=*/true));
247         chroma_subsampler.reset(new ChromaSubsampler);
248         check_error();
249 }
250
251 VideoStream::~VideoStream() {}
252
253 void VideoStream::start()
254 {
255         AVFormatContext *avctx = avformat_alloc_context();
256         avctx->oformat = av_guess_format("nut", nullptr, nullptr);
257
258         uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
259         avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
260         avctx->pb->write_data_type = &VideoStream::write_packet2_thunk;
261         avctx->pb->ignore_boundary_point = 1;
262
263         Mux::Codec video_codec = Mux::CODEC_MJPEG;
264
265         avctx->flags = AVFMT_FLAG_CUSTOM_IO;
266
267         string video_extradata;
268
269         constexpr int width = 1280, height = 720;  // Doesn't matter for MJPEG.
270         stream_mux.reset(new Mux(avctx, width, height, video_codec, video_extradata, /*audio_codec_parameters=*/nullptr, COARSE_TIMEBASE,
271                 /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}));
272
273
274         encode_thread = thread(&VideoStream::encode_thread_func, this);
275 }
276
277 void VideoStream::stop()
278 {
279         encode_thread.join();
280 }
281
282 void VideoStream::schedule_original_frame(int64_t output_pts, unsigned stream_idx, int64_t input_pts)
283 {
284         fprintf(stderr, "output_pts=%ld  original      input_pts=%ld\n", output_pts, input_pts);
285
286         QueuedFrame qf;
287         qf.type = QueuedFrame::ORIGINAL;
288         qf.output_pts = output_pts;
289         qf.stream_idx = stream_idx;
290         qf.input_first_pts = input_pts; 
291
292         unique_lock<mutex> lock(queue_lock);
293         frame_queue.push_back(qf);
294         queue_nonempty.notify_all();
295 }
296
297 void VideoStream::schedule_interpolated_frame(int64_t output_pts, unsigned stream_idx, int64_t input_first_pts, int64_t input_second_pts, float alpha)
298 {
299         fprintf(stderr, "output_pts=%ld  interpolated  input_pts1=%ld input_pts2=%ld alpha=%.3f\n", output_pts, input_first_pts, input_second_pts, alpha);
300
301         // Get the temporary OpenGL resources we need for doing the interpolation.
302         InterpolatedFrameResources resources;
303         {
304                 unique_lock<mutex> lock(queue_lock);
305                 if (interpolate_resources.empty()) {
306                         fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
307                         JPEGFrameView::insert_interpolated_frame(stream_idx, output_pts, nullptr);
308                         return;
309                 }
310                 resources = interpolate_resources.front();
311                 interpolate_resources.pop_front();
312         }
313
314         QueuedFrame qf;
315         qf.type = QueuedFrame::INTERPOLATED;
316         qf.output_pts = output_pts;
317         qf.stream_idx = stream_idx;
318         qf.resources = resources;
319
320         check_error();
321
322         // Convert frame0 and frame1 to OpenGL textures.
323         // TODO: Deduplicate against JPEGFrameView::setDecodedFrame?
324         for (size_t frame_no = 0; frame_no < 2; ++frame_no) {
325                 JPEGID jpeg_id;
326                 jpeg_id.stream_idx = stream_idx;
327                 jpeg_id.pts = frame_no == 1 ? input_second_pts : input_first_pts;
328                 jpeg_id.interpolated = false;
329                 bool did_decode;
330                 shared_ptr<Frame> frame = decode_jpeg_with_cache(jpeg_id, DECODE_IF_NOT_IN_CACHE, &did_decode);
331                 ycbcr_format.chroma_subsampling_x = frame->chroma_subsampling_x;
332                 ycbcr_format.chroma_subsampling_y = frame->chroma_subsampling_y;
333
334                 if (frame->is_semiplanar) {
335                         ycbcr_semiplanar_input->change_ycbcr_format(ycbcr_format);
336                         ycbcr_semiplanar_input->set_width(frame->width);
337                         ycbcr_semiplanar_input->set_height(frame->height);
338                         ycbcr_semiplanar_input->set_pixel_data(0, frame->y.get());
339                         ycbcr_semiplanar_input->set_pixel_data(1, frame->cbcr.get());
340                         ycbcr_semiplanar_input->set_pitch(0, frame->pitch_y);
341                         ycbcr_semiplanar_input->set_pitch(1, frame->pitch_chroma);
342                         ycbcr_semiplanar_convert_chain->render_to_fbo(resources.input_fbos[frame_no], 1280, 720);
343                 } else {
344                         ycbcr_planar_input->change_ycbcr_format(ycbcr_format);
345                         ycbcr_planar_input->set_width(frame->width);
346                         ycbcr_planar_input->set_height(frame->height);
347                         ycbcr_planar_input->set_pixel_data(0, frame->y.get());
348                         ycbcr_planar_input->set_pixel_data(1, frame->cb.get());
349                         ycbcr_planar_input->set_pixel_data(2, frame->cr.get());
350                         ycbcr_planar_input->set_pitch(0, frame->pitch_y);
351                         ycbcr_planar_input->set_pitch(1, frame->pitch_chroma);
352                         ycbcr_planar_input->set_pitch(2, frame->pitch_chroma);
353                         ycbcr_planar_convert_chain->render_to_fbo(resources.input_fbos[frame_no], 1280, 720);
354                 }
355         }
356
357         glGenerateTextureMipmap(resources.input_tex);
358         check_error();
359         glGenerateTextureMipmap(resources.gray_tex);
360         check_error();
361
362         // Compute the interpolated frame.
363         qf.flow_tex = compute_flow->exec(resources.gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
364         check_error();
365         tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources.input_tex, resources.gray_tex, qf.flow_tex, 1280, 720, alpha);
366         check_error();
367
368         // Subsample and split Cb/Cr.
369         chroma_subsampler->subsample_chroma(qf.cbcr_tex, 1280, 720, resources.cb_tex, resources.cr_tex);
370
371         // We could have released qf.flow_tex here, but to make sure we don't cause a stall
372         // when trying to reuse it for the next frame, we can just as well hold on to it
373         // and release it only when the readback is done.
374
375         // Read it down (asynchronously) to the CPU.
376         glPixelStorei(GL_PACK_ROW_LENGTH, 0);
377         glBindBuffer(GL_PIXEL_PACK_BUFFER, resources.pbo);
378         check_error();
379         glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
380         check_error();
381         glGetTextureImage(resources.cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720));
382         check_error();
383         glGetTextureImage(resources.cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720));
384         check_error();
385         glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
386
387         // Set a fence we can wait for to make sure the CPU sees the read.
388         glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
389         check_error();
390         qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
391         check_error();
392
393         unique_lock<mutex> lock(queue_lock);
394         frame_queue.push_back(qf);
395         queue_nonempty.notify_all();
396 }
397
398 void VideoStream::encode_thread_func()
399 {
400         pthread_setname_np(pthread_self(), "VideoStream");
401         QSurface *surface = create_surface();
402         QOpenGLContext *context = create_context(surface);
403         bool ok = make_current(context, surface);
404         if (!ok) {
405                 fprintf(stderr, "Video stream couldn't get an OpenGL context\n");
406                 exit(1);
407         }
408
409         for ( ;; ) {
410                 QueuedFrame qf;
411                 {
412                         unique_lock<mutex> lock(queue_lock);
413                         queue_nonempty.wait(lock, [this]{
414                                 return !frame_queue.empty();
415                         });
416                         qf = frame_queue.front();
417                         frame_queue.pop_front();
418                 }
419
420                 if (qf.type == QueuedFrame::ORIGINAL) {
421                         // Send the JPEG frame on, unchanged.
422                         string jpeg = read_file(filename_for_frame(qf.stream_idx, qf.input_first_pts));
423                         AVPacket pkt;
424                         av_init_packet(&pkt);
425                         pkt.stream_index = 0;
426                         pkt.data = (uint8_t *)jpeg.data();
427                         pkt.size = jpeg.size();
428                         stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
429                 } else if (qf.type == QueuedFrame::INTERPOLATED) {
430                         glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
431
432                         const uint8_t *y = (const uint8_t *)qf.resources.pbo_contents;
433                         const uint8_t *cb = (const uint8_t *)qf.resources.pbo_contents + 1280 * 720;
434                         const uint8_t *cr = (const uint8_t *)qf.resources.pbo_contents + 1280 * 720 + 640 * 720;
435
436                         // Send a copy of the frame on to display.
437                         shared_ptr<Frame> frame(new Frame);
438                         frame->y.reset(new uint8_t[1280 * 720]);
439                         frame->cb.reset(new uint8_t[640 * 720]);
440                         frame->cr.reset(new uint8_t[640 * 720]);
441                         for (unsigned yy = 0; yy < 720; ++yy) {
442                                 memcpy(frame->y.get() + 1280 * yy, y + 1280 * (719 - yy), 1280);
443                                 memcpy(frame->cb.get() + 640 * yy, cb + 640 * (719 - yy), 640);
444                                 memcpy(frame->cr.get() + 640 * yy, cr + 640 * (719 - yy), 640);
445                         }
446                         frame->is_semiplanar = false;
447                         frame->width = 1280;
448                         frame->height = 720;
449                         frame->chroma_subsampling_x = 2;
450                         frame->chroma_subsampling_y = 1;
451                         frame->pitch_y = 1280;
452                         frame->pitch_chroma = 640;
453                         JPEGFrameView::insert_interpolated_frame(qf.stream_idx, qf.output_pts, std::move(frame));
454
455                         // Now JPEG encode it, and send it on to the stream.
456                         vector<uint8_t> jpeg = encode_jpeg(y, cb, cr, 1280, 720);
457                         compute_flow->release_texture(qf.flow_tex);
458                         interpolate->release_texture(qf.output_tex);
459                         interpolate->release_texture(qf.cbcr_tex);
460
461                         AVPacket pkt;
462                         av_init_packet(&pkt);
463                         pkt.stream_index = 0;
464                         pkt.data = (uint8_t *)jpeg.data();
465                         pkt.size = jpeg.size();
466                         stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
467
468                         // Put the frame resources back.
469                         unique_lock<mutex> lock(queue_lock);
470                         interpolate_resources.push_back(qf.resources);
471                 }
472         }
473 }
474
475 int VideoStream::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
476 {
477         VideoStream *video_stream = (VideoStream *)opaque;
478         return video_stream->write_packet2(buf, buf_size, type, time);
479 }
480
481 int VideoStream::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
482 {
483         if (type == AVIO_DATA_MARKER_SYNC_POINT || type == AVIO_DATA_MARKER_BOUNDARY_POINT) {
484                 seen_sync_markers = true;
485         } else if (type == AVIO_DATA_MARKER_UNKNOWN && !seen_sync_markers) {
486                 // We don't know if this is a keyframe or not (the muxer could
487                 // avoid marking it), so we just have to make the best of it.
488                 type = AVIO_DATA_MARKER_SYNC_POINT;
489         }
490
491         if (type == AVIO_DATA_MARKER_HEADER) {
492                 stream_mux_header.append((char *)buf, buf_size);
493                 global_httpd->set_header(stream_mux_header);
494         } else {
495                 global_httpd->add_data((char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
496         }
497         return buf_size;
498 }
499