]> git.sesse.net Git - nageru/blob - video_stream.cpp
Change from operating point 3 to 2 (more laptop-friendly debugging).
[nageru] / video_stream.cpp
1 #include "video_stream.h"
2
3 extern "C" {
4 #include <libavformat/avformat.h>
5 #include <libavformat/avio.h>
6 }
7
8 #include <jpeglib.h>
9 #include <unistd.h>
10
11 #include "context.h"
12 #include "flow.h"
13 #include "httpd.h"
14 #include "jpeg_frame_view.h"
15 #include "movit/util.h"
16 #include "mux.h"
17 #include "player.h"
18 #include "util.h"
19
20 #include <epoxy/glx.h>
21
22 using namespace std;
23
24 extern HTTPD *global_httpd;
25
26 namespace {
27
28 string read_file(const string &filename)
29 {
30         FILE *fp = fopen(filename.c_str(), "rb");
31         if (fp == nullptr) {
32                 perror(filename.c_str());
33                 return "";
34         }
35
36         fseek(fp, 0, SEEK_END);
37         long len = ftell(fp);
38         rewind(fp);
39
40         string ret;
41         ret.resize(len);
42         fread(&ret[0], len, 1, fp);
43         fclose(fp);
44         return ret;
45 }
46
47 }  // namespace
48
49 struct VectorDestinationManager {
50         jpeg_destination_mgr pub;
51         std::vector<uint8_t> dest;
52
53         VectorDestinationManager()
54         {
55                 pub.init_destination = init_destination_thunk;
56                 pub.empty_output_buffer = empty_output_buffer_thunk;
57                 pub.term_destination = term_destination_thunk;
58         }
59
60         static void init_destination_thunk(j_compress_ptr ptr)
61         {
62                 ((VectorDestinationManager *)(ptr->dest))->init_destination();
63         }
64
65         inline void init_destination()
66         {
67                 make_room(0);
68         }
69
70         static boolean empty_output_buffer_thunk(j_compress_ptr ptr)
71         {
72                 return ((VectorDestinationManager *)(ptr->dest))->empty_output_buffer();
73         }
74
75         inline bool empty_output_buffer()
76         {
77                 make_room(dest.size());  // Should ignore pub.free_in_buffer!
78                 return true;
79         }
80
81         inline void make_room(size_t bytes_used)
82         {
83                 dest.resize(bytes_used + 4096);
84                 dest.resize(dest.capacity());
85                 pub.next_output_byte = dest.data() + bytes_used;
86                 pub.free_in_buffer = dest.size() - bytes_used;
87         }
88
89         static void term_destination_thunk(j_compress_ptr ptr)
90         {
91                 ((VectorDestinationManager *)(ptr->dest))->term_destination();
92         }
93
94         inline void term_destination()
95         {
96                 dest.resize(dest.size() - pub.free_in_buffer);
97         }
98 };
99 static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
100
101 vector<uint8_t> encode_jpeg(const uint8_t *y_data, const uint8_t *cbcr_data, unsigned width, unsigned height)
102 {
103         VectorDestinationManager dest;
104
105         jpeg_compress_struct cinfo;
106         jpeg_error_mgr jerr;
107         cinfo.err = jpeg_std_error(&jerr);
108         jpeg_create_compress(&cinfo);
109
110         cinfo.dest = (jpeg_destination_mgr *)&dest;
111         cinfo.input_components = 3;
112         cinfo.in_color_space = JCS_RGB;
113         jpeg_set_defaults(&cinfo);
114         constexpr int quality = 90;
115         jpeg_set_quality(&cinfo, quality, /*force_baseline=*/false);
116
117         cinfo.image_width = width;
118         cinfo.image_height = height;
119         cinfo.raw_data_in = true;
120         jpeg_set_colorspace(&cinfo, JCS_YCbCr);
121         cinfo.comp_info[0].h_samp_factor = 2;
122         cinfo.comp_info[0].v_samp_factor = 1;
123         cinfo.comp_info[1].h_samp_factor = 1;
124         cinfo.comp_info[1].v_samp_factor = 1;
125         cinfo.comp_info[2].h_samp_factor = 1;
126         cinfo.comp_info[2].v_samp_factor = 1;
127         cinfo.CCIR601_sampling = true;  // Seems to be mostly ignored by libjpeg, though.
128         jpeg_start_compress(&cinfo, true);
129
130         // TODO: Subsample on the GPU.
131         unique_ptr<uint8_t[]> cbdata(new uint8_t[(width/2) * 8]);
132         unique_ptr<uint8_t[]> crdata(new uint8_t[(width/2) * 8]);
133         JSAMPROW yptr[8], cbptr[8], crptr[8];
134         JSAMPARRAY data[3] = { yptr, cbptr, crptr };
135         for (unsigned yy = 0; yy < 8; ++yy) {
136                 cbptr[yy] = cbdata.get() + yy * (width / 2);
137                 crptr[yy] = crdata.get() + yy * (width / 2);
138         }
139         for (unsigned y = 0; y < height; y += 8) {
140                 uint8_t *cbptr = cbdata.get();
141                 uint8_t *crptr = crdata.get();
142                 for (unsigned yy = 0; yy < 8; ++yy) {
143                         yptr[yy] = const_cast<JSAMPROW>(&y_data[(height - y - yy - 1) * width]);
144                         const uint8_t *sptr = &cbcr_data[(height - y - yy - 1) * width * 2];
145                         for (unsigned x = 0; x < width; x += 2) {
146                                 *cbptr++ = (sptr[0] + sptr[2]) / 2;
147                                 *crptr++ = (sptr[1] + sptr[3]) / 2;
148                                 sptr += 4;
149                         }
150                 }
151
152                 jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8);
153         }
154
155         jpeg_finish_compress(&cinfo);
156         jpeg_destroy_compress(&cinfo);
157
158         return move(dest.dest);
159 }
160
161 VideoStream::VideoStream()
162 {
163         using namespace movit;
164         // TODO: deduplicate code against JPEGFrameView?
165         ycbcr_convert_chain.reset(new EffectChain(1280, 720));
166         ImageFormat image_format;
167         image_format.color_space = COLORSPACE_sRGB;
168         image_format.gamma_curve = GAMMA_sRGB;
169         ycbcr_format.luma_coefficients = YCBCR_REC_709;
170         ycbcr_format.full_range = true;  // JPEG.
171         ycbcr_format.num_levels = 256;
172         ycbcr_format.chroma_subsampling_x = 2;
173         ycbcr_format.chroma_subsampling_y = 1;
174         ycbcr_format.cb_x_position = 0.0f;  // H.264 -- _not_ JPEG, even though our input is MJPEG-encoded
175         ycbcr_format.cb_y_position = 0.5f;  // Irrelevant.
176         ycbcr_format.cr_x_position = 0.0f;
177         ycbcr_format.cr_y_position = 0.5f;
178         ycbcr_input = (movit::YCbCrInput *)ycbcr_convert_chain->add_input(new YCbCrInput(image_format, ycbcr_format, 1280, 720));
179
180         YCbCrFormat ycbcr_output_format = ycbcr_format;
181         ycbcr_output_format.chroma_subsampling_x = 1;
182
183         ImageFormat inout_format;
184         inout_format.color_space = COLORSPACE_sRGB;
185         inout_format.gamma_curve = GAMMA_sRGB;
186
187         check_error();
188
189         // One full Y'CbCr texture (for interpolation), one that's just Y (throwing away the
190         // Cb and Cr channels). The second copy is sort of redundant, but it's the easiest way
191         // of getting the gray data into a layered texture.
192         ycbcr_convert_chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, ycbcr_output_format);
193         check_error();
194         ycbcr_convert_chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, ycbcr_output_format);
195         check_error();
196         ycbcr_convert_chain->set_dither_bits(8);
197         check_error();
198         ycbcr_convert_chain->finalize();
199         check_error();
200
201         GLuint input_tex[num_interpolate_slots], gray_tex[num_interpolate_slots];
202         glCreateTextures(GL_TEXTURE_2D_ARRAY, 10, input_tex);
203         glCreateTextures(GL_TEXTURE_2D_ARRAY, 10, gray_tex);
204         check_error();
205         constexpr size_t width = 1280, height = 720;  // FIXME: adjustable width, height
206         int levels = find_num_levels(width, height);
207         for (size_t i = 0; i < num_interpolate_slots; ++i) {
208                 glTextureStorage3D(input_tex[i], levels, GL_RGBA8, width, height, 2);
209                 check_error();
210                 glTextureStorage3D(gray_tex[i], levels, GL_R8, width, height, 2);
211                 check_error();
212
213                 InterpolatedFrameResources resource;
214                 resource.input_tex = input_tex[i];
215                 resource.gray_tex = gray_tex[i];
216                 glCreateFramebuffers(2, resource.input_fbos);
217                 check_error();
218
219                 glNamedFramebufferTextureLayer(resource.input_fbos[0], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 0);
220                 check_error();
221                 glNamedFramebufferTextureLayer(resource.input_fbos[0], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 0);
222                 check_error();
223                 glNamedFramebufferTextureLayer(resource.input_fbos[1], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 1);
224                 check_error();
225                 glNamedFramebufferTextureLayer(resource.input_fbos[1], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 1);
226                 check_error();
227
228                 GLuint bufs[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 };
229                 glNamedFramebufferDrawBuffers(resource.input_fbos[0], 2, bufs);
230                 check_error();
231                 glNamedFramebufferDrawBuffers(resource.input_fbos[1], 2, bufs);
232                 check_error();
233
234                 glCreateBuffers(1, &resource.pbo);
235                 check_error();
236                 glNamedBufferStorage(resource.pbo, width * height * 4, nullptr, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
237                 check_error();
238                 resource.pbo_contents = glMapNamedBufferRange(resource.pbo, 0, width * height * 4, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT); 
239                 interpolate_resources.push_back(resource);
240         }
241
242         check_error();
243
244         compute_flow.reset(new DISComputeFlow(width, height, operating_point2));
245         interpolate.reset(new Interpolate(width, height, operating_point2, /*split_ycbcr_output=*/true));
246         check_error();
247 }
248
249 VideoStream::~VideoStream() {}
250
251 void VideoStream::start()
252 {
253         AVFormatContext *avctx = avformat_alloc_context();
254         avctx->oformat = av_guess_format("nut", nullptr, nullptr);
255
256         uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
257         avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
258         avctx->pb->write_data_type = &VideoStream::write_packet2_thunk;
259         avctx->pb->ignore_boundary_point = 1;
260
261         Mux::Codec video_codec = Mux::CODEC_MJPEG;
262
263         avctx->flags = AVFMT_FLAG_CUSTOM_IO;
264
265         string video_extradata;
266
267         constexpr int width = 1280, height = 720;  // Doesn't matter for MJPEG.
268         stream_mux.reset(new Mux(avctx, width, height, video_codec, video_extradata, /*audio_codec_parameters=*/nullptr, COARSE_TIMEBASE,
269                 /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}));
270
271
272         encode_thread = thread(&VideoStream::encode_thread_func, this);
273 }
274
275 void VideoStream::stop()
276 {
277         encode_thread.join();
278 }
279
280 void VideoStream::schedule_original_frame(int64_t output_pts, unsigned stream_idx, int64_t input_pts)
281 {
282         fprintf(stderr, "output_pts=%ld  original      input_pts=%ld\n", output_pts, input_pts);
283
284         QueuedFrame qf;
285         qf.type = QueuedFrame::ORIGINAL;
286         qf.output_pts = output_pts;
287         qf.stream_idx = stream_idx;
288         qf.input_first_pts = input_pts; 
289
290         unique_lock<mutex> lock(queue_lock);
291         frame_queue.push_back(qf);
292         queue_nonempty.notify_all();
293 }
294
295 void VideoStream::schedule_interpolated_frame(int64_t output_pts, unsigned stream_idx, int64_t input_first_pts, int64_t input_second_pts, float alpha)
296 {
297         fprintf(stderr, "output_pts=%ld  interpolated  input_pts1=%ld input_pts2=%ld alpha=%.3f\n", output_pts, input_first_pts, input_second_pts, alpha);
298
299         // Get the temporary OpenGL resources we need for doing the interpolation.
300         InterpolatedFrameResources resources;
301         {
302                 unique_lock<mutex> lock(queue_lock);
303                 if (interpolate_resources.empty()) {
304                         fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
305                         return;
306                 }
307                 resources = interpolate_resources.front();
308                 interpolate_resources.pop_front();
309         }
310
311         QueuedFrame qf;
312         qf.type = QueuedFrame::INTERPOLATED;
313         qf.output_pts = output_pts;
314         qf.stream_idx = stream_idx;
315         qf.resources = resources;
316
317         check_error();
318
319         // Convert frame0 and frame1 to OpenGL textures.
320         // TODO: Deduplicate against JPEGFrameView::setDecodedFrame?
321         for (size_t frame_no = 0; frame_no < 2; ++frame_no) {
322                 JPEGID jpeg_id;
323                 jpeg_id.stream_idx = stream_idx;
324                 jpeg_id.pts = frame_no == 1 ? input_second_pts : input_first_pts;
325                 bool did_decode;
326                 shared_ptr<Frame> frame = decode_jpeg_with_cache(jpeg_id, DECODE_IF_NOT_IN_CACHE, &did_decode);
327                 ycbcr_format.chroma_subsampling_x = frame->chroma_subsampling_x;
328                 ycbcr_format.chroma_subsampling_y = frame->chroma_subsampling_y;
329                 ycbcr_input->change_ycbcr_format(ycbcr_format);
330                 ycbcr_input->set_width(frame->width);
331                 ycbcr_input->set_height(frame->height);
332                 ycbcr_input->set_pixel_data(0, frame->y.get());
333                 ycbcr_input->set_pixel_data(1, frame->cb.get());
334                 ycbcr_input->set_pixel_data(2, frame->cr.get());
335                 ycbcr_input->set_pitch(0, frame->pitch_y);
336                 ycbcr_input->set_pitch(1, frame->pitch_chroma);
337                 ycbcr_input->set_pitch(2, frame->pitch_chroma);
338                 ycbcr_convert_chain->render_to_fbo(resources.input_fbos[frame_no], 1280, 720);
339         }
340
341         glGenerateTextureMipmap(resources.input_tex);
342         check_error();
343         glGenerateTextureMipmap(resources.gray_tex);
344         check_error();
345
346         // Compute the interpolated frame.
347         qf.flow_tex = compute_flow->exec(resources.gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
348         check_error();
349         tie(qf.output_tex, qf.output2_tex) = interpolate->exec(resources.input_tex, resources.gray_tex, qf.flow_tex, 1280, 720, alpha);
350         check_error();
351
352         // We could have released qf.flow_tex here, but to make sure we don't cause a stall
353         // when trying to reuse it for the next frame, we can just as well hold on to it
354         // and release it only when the readback is done.
355
356         // Read it down (asynchronously) to the CPU.
357         glPixelStorei(GL_PACK_ROW_LENGTH, 0);
358         glBindBuffer(GL_PIXEL_PACK_BUFFER, resources.pbo);
359         check_error();
360         glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
361         check_error();
362         glGetTextureImage(qf.output2_tex, 0, GL_RG, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720));
363         check_error();
364         glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
365
366         // Set a fence we can wait for to make sure the CPU sees the read.
367         glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
368         check_error();
369         qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
370         check_error();
371
372         unique_lock<mutex> lock(queue_lock);
373         frame_queue.push_back(qf);
374         queue_nonempty.notify_all();
375 }
376
377 void VideoStream::encode_thread_func()
378 {
379         pthread_setname_np(pthread_self(), "VideoStream");
380         QSurface *surface = create_surface();
381         QOpenGLContext *context = create_context(surface);
382         bool ok = make_current(context, surface);
383         if (!ok) {
384                 fprintf(stderr, "Video stream couldn't get an OpenGL context\n");
385                 exit(1);
386         }
387
388         for ( ;; ) {
389                 QueuedFrame qf;
390                 {
391                         unique_lock<mutex> lock(queue_lock);
392                         queue_nonempty.wait(lock, [this]{
393                                 return !frame_queue.empty();
394                         });
395                         qf = frame_queue.front();
396                         frame_queue.pop_front();
397                 }
398
399                 if (qf.type == QueuedFrame::ORIGINAL) {
400                         // Send the JPEG frame on, unchanged.
401                         string jpeg = read_file(filename_for_frame(qf.stream_idx, qf.input_first_pts));
402                         AVPacket pkt;
403                         av_init_packet(&pkt);
404                         pkt.stream_index = 0;
405                         pkt.data = (uint8_t *)jpeg.data();
406                         pkt.size = jpeg.size();
407                         stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
408                 } else if (qf.type == QueuedFrame::INTERPOLATED) {
409                         glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
410
411                         vector<uint8_t> jpeg = encode_jpeg(
412                                 (const uint8_t *)qf.resources.pbo_contents,
413                                 (const uint8_t *)qf.resources.pbo_contents + 1280 * 720,
414                                 1280, 720);
415                         compute_flow->release_texture(qf.flow_tex);
416                         interpolate->release_texture(qf.output_tex);
417                         interpolate->release_texture(qf.output2_tex);
418
419                         AVPacket pkt;
420                         av_init_packet(&pkt);
421                         pkt.stream_index = 0;
422                         pkt.data = (uint8_t *)jpeg.data();
423                         pkt.size = jpeg.size();
424                         stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
425
426                         // Put the frame resources back.
427                         unique_lock<mutex> lock(queue_lock);
428                         interpolate_resources.push_back(qf.resources);
429                 }
430         }
431 }
432
433 int VideoStream::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
434 {
435         VideoStream *video_stream = (VideoStream *)opaque;
436         return video_stream->write_packet2(buf, buf_size, type, time);
437 }
438
439 int VideoStream::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
440 {
441         if (type == AVIO_DATA_MARKER_SYNC_POINT || type == AVIO_DATA_MARKER_BOUNDARY_POINT) {
442                 seen_sync_markers = true;
443         } else if (type == AVIO_DATA_MARKER_UNKNOWN && !seen_sync_markers) {
444                 // We don't know if this is a keyframe or not (the muxer could
445                 // avoid marking it), so we just have to make the best of it.
446                 type = AVIO_DATA_MARKER_SYNC_POINT;
447         }
448
449         if (type == AVIO_DATA_MARKER_HEADER) {
450                 stream_mux_header.append((char *)buf, buf_size);
451                 global_httpd->set_header(stream_mux_header);
452         } else {
453                 global_httpd->add_data((char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
454         }
455         return buf_size;
456 }
457