]> git.sesse.net Git - nageru/blob - jpeg_frame_view.cpp
Make the cache pruning work on bytes instead of number of images.
[nageru] / jpeg_frame_view.cpp
1 #include "jpeg_frame_view.h"
2
3 #include "defs.h"
4 #include "jpeg_destroyer.h"
5 #include "post_to_main_thread.h"
6 #include "video_stream.h"
7 #include "ycbcr_converter.h"
8
9 #include <QMouseEvent>
10 #include <QScreen>
11 #include <atomic>
12 #include <condition_variable>
13 #include <deque>
14 #include <jpeglib.h>
15 #include <movit/init.h>
16 #include <movit/resource_pool.h>
17 #include <movit/util.h>
18 #include <mutex>
19 #include <stdint.h>
20 #include <thread>
21 #include <unistd.h>
22 #include <utility>
23
24 // Must come after the Qt stuff.
25 #include "vaapi_jpeg_decoder.h"
26
27 using namespace movit;
28 using namespace std;
29
30 namespace {
31
32 // Just an arbitrary order for std::map.
33 struct JPEGIDLexicalOrder
34 {
35         bool operator() (const JPEGID &a, const JPEGID &b) const
36         {
37                 if (a.stream_idx != b.stream_idx)
38                         return a.stream_idx < b.stream_idx;
39                 if (a.pts != b.pts)
40                         return a.pts < b.pts;
41                 return a.interpolated < b.interpolated;
42         }
43 };
44
45 inline size_t frame_size(const Frame &frame)
46 {
47         size_t y_size = frame.width * frame.height;
48         size_t cbcr_size = y_size / frame.chroma_subsampling_x / frame.chroma_subsampling_y;
49         return y_size + cbcr_size * 2;
50 }
51
52 struct LRUFrame {
53         shared_ptr<Frame> frame;
54         size_t last_used;
55 };
56
57 struct PendingDecode {
58         JPEGID primary, secondary;
59         float fade_alpha;  // Irrelevant if secondary.stream_idx == -1.
60         JPEGFrameView *destination;
61 };
62
63 }  // namespace
64
65 thread JPEGFrameView::jpeg_decoder_thread;
66 mutex cache_mu;
67 map<JPEGID, LRUFrame, JPEGIDLexicalOrder> cache;  // Under cache_mu.
68 size_t cache_bytes_used = 0;  // Under cache_mu.
69 condition_variable any_pending_decodes;
70 deque<PendingDecode> pending_decodes;  // Under cache_mu.
71 atomic<size_t> event_counter{0};
72 extern QGLWidget *global_share_widget;
73 extern atomic<bool> should_quit;
74
75 shared_ptr<Frame> decode_jpeg(const string &filename)
76 {
77         shared_ptr<Frame> frame;
78         if (vaapi_jpeg_decoding_usable) {
79                 frame = decode_jpeg_vaapi(filename);
80                 if (frame != nullptr) {
81                         return frame;
82                 }
83                 fprintf(stderr, "VA-API hardware decoding failed; falling back to software.\n");
84         }
85
86         frame.reset(new Frame);
87
88         jpeg_decompress_struct dinfo;
89         jpeg_error_mgr jerr;
90         dinfo.err = jpeg_std_error(&jerr);
91         jpeg_create_decompress(&dinfo);
92         JPEGDestroyer destroy_dinfo(&dinfo);
93
94         FILE *fp = fopen(filename.c_str(), "rb");
95         if (fp == nullptr) {
96                 perror(filename.c_str());
97                 exit(1);
98         }
99         jpeg_stdio_src(&dinfo, fp);
100
101         jpeg_read_header(&dinfo, true);
102
103         if (dinfo.num_components != 3) {
104                 fprintf(stderr, "Not a color JPEG. (%d components, Y=%dx%d, Cb=%dx%d, Cr=%dx%d)\n",
105                         dinfo.num_components,
106                         dinfo.comp_info[0].h_samp_factor, dinfo.comp_info[0].v_samp_factor,
107                         dinfo.comp_info[1].h_samp_factor, dinfo.comp_info[1].v_samp_factor,
108                         dinfo.comp_info[2].h_samp_factor, dinfo.comp_info[2].v_samp_factor);
109                 exit(1);
110         }
111         if (dinfo.comp_info[0].h_samp_factor != dinfo.max_h_samp_factor ||
112             dinfo.comp_info[0].v_samp_factor != dinfo.max_v_samp_factor ||  // Y' must not be subsampled.
113             dinfo.comp_info[1].h_samp_factor != dinfo.comp_info[2].h_samp_factor ||
114             dinfo.comp_info[1].v_samp_factor != dinfo.comp_info[2].v_samp_factor ||  // Cb and Cr must be identically subsampled.
115             (dinfo.max_h_samp_factor % dinfo.comp_info[1].h_samp_factor) != 0 ||
116             (dinfo.max_v_samp_factor % dinfo.comp_info[1].v_samp_factor) != 0) {  // No 2:3 subsampling or other weirdness.
117                 fprintf(stderr, "Unsupported subsampling scheme. (Y=%dx%d, Cb=%dx%d, Cr=%dx%d)\n",
118                         dinfo.comp_info[0].h_samp_factor, dinfo.comp_info[0].v_samp_factor,
119                         dinfo.comp_info[1].h_samp_factor, dinfo.comp_info[1].v_samp_factor,
120                         dinfo.comp_info[2].h_samp_factor, dinfo.comp_info[2].v_samp_factor);
121                 exit(1);
122         }
123         dinfo.raw_data_out = true;
124
125         jpeg_start_decompress(&dinfo);
126
127         frame->width = dinfo.output_width;
128         frame->height = dinfo.output_height;
129         frame->chroma_subsampling_x = dinfo.max_h_samp_factor / dinfo.comp_info[1].h_samp_factor;
130         frame->chroma_subsampling_y = dinfo.max_v_samp_factor / dinfo.comp_info[1].v_samp_factor;
131
132         unsigned h_mcu_size = DCTSIZE * dinfo.max_h_samp_factor;
133         unsigned v_mcu_size = DCTSIZE * dinfo.max_v_samp_factor;
134         unsigned mcu_width_blocks = (dinfo.output_width + h_mcu_size - 1) / h_mcu_size;
135         unsigned mcu_height_blocks = (dinfo.output_height + v_mcu_size - 1) / v_mcu_size;
136
137         unsigned luma_width_blocks = mcu_width_blocks * dinfo.comp_info[0].h_samp_factor;
138         unsigned chroma_width_blocks = mcu_width_blocks * dinfo.comp_info[1].h_samp_factor;
139         unsigned luma_height_blocks = mcu_height_blocks * dinfo.comp_info[0].v_samp_factor;
140         unsigned chroma_height_blocks = mcu_height_blocks * dinfo.comp_info[1].v_samp_factor;
141
142         // TODO: Decode into a PBO.
143         frame->y.reset(new uint8_t[luma_width_blocks * luma_height_blocks * DCTSIZE2]);
144         frame->cb.reset(new uint8_t[chroma_width_blocks * chroma_height_blocks * DCTSIZE2]);
145         frame->cr.reset(new uint8_t[chroma_width_blocks * chroma_height_blocks * DCTSIZE2]);
146         frame->pitch_y = luma_width_blocks * DCTSIZE;
147         frame->pitch_chroma = chroma_width_blocks * DCTSIZE;
148
149         JSAMPROW yptr[v_mcu_size], cbptr[v_mcu_size], crptr[v_mcu_size];
150         JSAMPARRAY data[3] = { yptr, cbptr, crptr };
151         for (unsigned y = 0; y < mcu_height_blocks; ++y) {
152                 // NOTE: The last elements of cbptr/crptr will be unused for vertically subsampled chroma.
153                 for (unsigned yy = 0; yy < v_mcu_size; ++yy) {
154                         yptr[yy] = frame->y.get() + (y * DCTSIZE * dinfo.max_v_samp_factor + yy) * frame->pitch_y;
155                         cbptr[yy] = frame->cb.get() + (y * DCTSIZE * dinfo.comp_info[1].v_samp_factor + yy) * frame->pitch_chroma;
156                         crptr[yy] = frame->cr.get() + (y * DCTSIZE * dinfo.comp_info[1].v_samp_factor + yy) * frame->pitch_chroma;
157                 }
158
159                 jpeg_read_raw_data(&dinfo, data, v_mcu_size);
160         }
161
162         (void)jpeg_finish_decompress(&dinfo);
163         fclose(fp);
164
165         return frame;
166 }
167
168 void prune_cache()
169 {
170         // Assumes cache_mu is held.
171         int64_t bytes_still_to_remove = cache_bytes_used - (size_t(CACHE_SIZE_MB) * 1024 * 1024) * 9 / 10;
172         if (bytes_still_to_remove <= 0) return;
173
174         vector<pair<size_t, size_t>> lru_timestamps_and_size;
175         for (const auto &key_and_value : cache) {
176                 lru_timestamps_and_size.emplace_back(
177                         key_and_value.second.last_used,
178                         frame_size(*key_and_value.second.frame));
179         }
180         sort(lru_timestamps_and_size.begin(), lru_timestamps_and_size.end());
181
182         // Remove the oldest ones until we are below 90% of the cache used.
183         size_t lru_cutoff_point = 0;
184         for (const pair<size_t, size_t> &it : lru_timestamps_and_size) {
185                 lru_cutoff_point = it.first;
186                 bytes_still_to_remove -= it.second;
187                 if (bytes_still_to_remove <= 0) break;
188         }
189
190         for (auto it = cache.begin(); it != cache.end(); ) {
191                 if (it->second.last_used <= lru_cutoff_point) {
192                         cache_bytes_used -= frame_size(*it->second.frame);
193                         it = cache.erase(it);
194                 } else {
195                         ++it;
196                 }
197         }
198 }
199
200 shared_ptr<Frame> decode_jpeg_with_cache(JPEGID id, CacheMissBehavior cache_miss_behavior, bool *did_decode)
201 {
202         *did_decode = false;
203         {
204                 unique_lock<mutex> lock(cache_mu);
205                 auto it = cache.find(id);
206                 if (it != cache.end()) {
207                         it->second.last_used = event_counter++;
208                         return it->second.frame;
209                 }
210         }
211
212         if (cache_miss_behavior == RETURN_NULLPTR_IF_NOT_IN_CACHE) {
213                 return nullptr;
214         }
215
216         assert(!id.interpolated);
217         *did_decode = true;
218         shared_ptr<Frame> frame = decode_jpeg(filename_for_frame(id.stream_idx, id.pts));
219
220         unique_lock<mutex> lock(cache_mu);
221         cache_bytes_used += frame_size(*frame);
222         cache[id] = LRUFrame{ frame, event_counter++ };
223
224         if (cache_bytes_used > size_t(CACHE_SIZE_MB) * 1024 * 1024) {
225                 prune_cache();
226         }
227         return frame;
228 }
229
230 void jpeg_decoder_thread_func()
231 {
232         size_t num_decoded = 0, num_dropped = 0;
233
234         pthread_setname_np(pthread_self(), "JPEGDecoder");
235         while (!should_quit.load()) {
236                 PendingDecode decode;
237                 CacheMissBehavior cache_miss_behavior = DECODE_IF_NOT_IN_CACHE;
238                 {
239                         unique_lock<mutex> lock(cache_mu);  // TODO: Perhaps under another lock?
240                         any_pending_decodes.wait(lock, [] {
241                                 return !pending_decodes.empty() || should_quit.load();
242                         });
243                         if (should_quit.load())
244                                 break;
245                         decode = pending_decodes.front();
246                         pending_decodes.pop_front();
247
248                         size_t num_pending = 0;
249                         for (const PendingDecode &other_decode : pending_decodes) {
250                                 if (other_decode.destination == decode.destination) {
251                                         ++num_pending;
252                                 }
253                         }
254                         if (num_pending > 3) {
255                                 cache_miss_behavior = RETURN_NULLPTR_IF_NOT_IN_CACHE;
256                         }
257                 }
258
259                 shared_ptr<Frame> primary_frame, secondary_frame;
260                 bool drop = false;
261                 for (int subframe_idx = 0; subframe_idx < 2; ++subframe_idx) {
262                         const JPEGID &id = (subframe_idx == 0 ? decode.primary : decode.secondary);
263                         if (id.stream_idx == (unsigned)-1) {
264                                 // No secondary frame.
265                                 continue;
266                         }
267
268                         bool found_in_cache;
269                         shared_ptr<Frame> frame;
270                         if (id.interpolated) {
271                                 // Interpolated frames are never decoded by us,
272                                 // put directly into the cache from VideoStream.
273                                 unique_lock<mutex> lock(cache_mu);
274                                 auto it = cache.find(id);
275                                 if (it != cache.end()) {
276                                         it->second.last_used = event_counter++;
277                                         frame = it->second.frame;
278                                 } else {
279                                         // This can only really happen if it disappeared out of the
280                                         // LRU really, really fast. Which shouldn't happen.
281                                         fprintf(stderr, "WARNING: Interpolated JPEG was supposed to be in the cache, but was not\n");
282                                 }
283                                 found_in_cache = true;  // Don't count it as a decode.
284                         } else {
285                                 frame = decode_jpeg_with_cache(id, cache_miss_behavior, &found_in_cache);
286                         }
287
288                         if (frame == nullptr) {
289                                 assert(id.interpolated || cache_miss_behavior == RETURN_NULLPTR_IF_NOT_IN_CACHE);
290                                 drop = true;
291                                 break;
292                         }
293
294                         if (!found_in_cache) {
295                                 ++num_decoded;
296                                 if (num_decoded % 1000 == 0) {
297                                         fprintf(stderr, "Decoded %zu images, dropped %zu (%.2f%% dropped)\n",
298                                                 num_decoded, num_dropped, (100.0 * num_dropped) / (num_decoded + num_dropped));
299                                 }
300                         }
301                         if (subframe_idx == 0) {
302                                 primary_frame = move(frame);
303                         } else {
304                                 secondary_frame = move(frame);
305                         }
306                 }
307                 if (drop) {
308                         ++num_dropped;
309                         continue;
310                 }
311
312                 // TODO: Could we get jitter between non-interpolated and interpolated frames here?
313                 decode.destination->setDecodedFrame(primary_frame, secondary_frame, decode.fade_alpha);
314         }
315 }
316
317 void JPEGFrameView::shutdown()
318 {
319         any_pending_decodes.notify_all();
320         jpeg_decoder_thread.join();
321 }
322
323 JPEGFrameView::JPEGFrameView(QWidget *parent)
324         : QGLWidget(parent, global_share_widget)
325 {
326 }
327
328 void JPEGFrameView::setFrame(unsigned stream_idx, int64_t pts, bool interpolated, int secondary_stream_idx, int64_t secondary_pts, float fade_alpha)
329 {
330         current_stream_idx = stream_idx;  // TODO: Does this interact with fades?
331
332         unique_lock<mutex> lock(cache_mu);
333         PendingDecode decode;
334         if (interpolated && secondary_stream_idx != -1) {
335                 // The frame will already be faded for us, so ask for only one; we shouldn't fade it against anything.
336                 decode.primary = create_jpegid_for_interpolated_fade(stream_idx, pts, secondary_stream_idx, secondary_pts);
337                 decode.secondary = JPEGID{ (unsigned)-1, -1, /*interpolated=*/false };
338         } else {
339                 decode.primary = JPEGID{ stream_idx, pts, interpolated };
340                 decode.secondary = JPEGID{ (unsigned)secondary_stream_idx, secondary_pts, /*interpolated=*/false };
341         }
342         decode.fade_alpha = fade_alpha;
343         decode.destination = this;
344         pending_decodes.push_back(decode);
345         any_pending_decodes.notify_all();
346 }
347
348 void JPEGFrameView::insert_interpolated_frame(JPEGID id, shared_ptr<Frame> frame)
349 {
350         // We rely on the frame not being evicted from the cache before
351         // jpeg_decoder_thread() sees it and can display it (otherwise,
352         // that thread would hang). With a default cache of 1000 elements,
353         // that would sound like a reasonable assumption.
354         unique_lock<mutex> lock(cache_mu);
355         cache_bytes_used += frame_size(*frame);
356         cache[id] = LRUFrame{ std::move(frame), event_counter++ };
357         if (cache_bytes_used > size_t(CACHE_SIZE_MB) * 1024 * 1024) {
358                 prune_cache();
359         }
360 }
361
362 ResourcePool *resource_pool = nullptr;
363
364 void JPEGFrameView::initializeGL()
365 {
366         glDisable(GL_BLEND);
367         glDisable(GL_DEPTH_TEST);
368         check_error();
369
370         static once_flag once;
371         call_once(once, [] {
372                 resource_pool = new ResourcePool;
373                 jpeg_decoder_thread = std::thread(jpeg_decoder_thread_func);
374         });
375
376         ycbcr_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_RGBA, resource_pool));
377
378         ImageFormat inout_format;
379         inout_format.color_space = COLORSPACE_sRGB;
380         inout_format.gamma_curve = GAMMA_sRGB;
381
382         overlay_chain.reset(new EffectChain(overlay_base_width, overlay_base_height, resource_pool));
383         overlay_input = (movit::FlatInput *)overlay_chain->add_input(new FlatInput(inout_format, FORMAT_GRAYSCALE, GL_UNSIGNED_BYTE, overlay_base_width, overlay_base_height));
384
385         overlay_chain->add_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED);
386         overlay_chain->finalize();
387 }
388
389 void JPEGFrameView::resizeGL(int width, int height)
390 {
391         check_error();
392         glViewport(0, 0, width, height);
393         check_error();
394
395         // Save these, as width() and height() will lie with DPI scaling.
396         gl_width = width;
397         gl_height = height;
398 }
399
400 void JPEGFrameView::paintGL()
401 {
402         glViewport(0, 0, gl_width, gl_height);
403         if (current_frame == nullptr) {
404                 glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
405                 glClear(GL_COLOR_BUFFER_BIT);
406                 return;
407         }
408
409         check_error();
410         current_chain->render_to_screen();
411
412         if (overlay_image != nullptr) {
413                 if (overlay_input_needs_refresh) {
414                         overlay_input->set_width(overlay_width);
415                         overlay_input->set_height(overlay_height);
416                         overlay_input->set_pixel_data(overlay_image->bits());
417                 }
418                 glViewport(gl_width - overlay_width, 0, overlay_width, overlay_height);
419                 overlay_chain->render_to_screen();
420         }
421 }
422
423 namespace {
424
425 }  // namespace
426
427 void JPEGFrameView::setDecodedFrame(shared_ptr<Frame> frame, shared_ptr<Frame> secondary_frame, float fade_alpha)
428 {
429         post_to_main_thread([this, frame, secondary_frame, fade_alpha] {
430                 current_frame = frame;
431                 current_secondary_frame = secondary_frame;
432
433                 if (secondary_frame != nullptr) {
434                         current_chain = ycbcr_converter->prepare_chain_for_fade(frame, secondary_frame, fade_alpha);
435                 } else {
436                         current_chain = ycbcr_converter->prepare_chain_for_conversion(frame);
437                 }
438                 update();
439         });
440 }
441
442 void JPEGFrameView::mousePressEvent(QMouseEvent *event)
443 {
444         if (event->type() == QEvent::MouseButtonPress && event->button() == Qt::LeftButton) {
445                 emit clicked();
446         }
447 }
448
449 void JPEGFrameView::set_overlay(const string &text)
450 {
451         if (text.empty()) {
452                 overlay_image.reset();
453                 return;
454         }
455
456         float dpr = QGuiApplication::primaryScreen()->devicePixelRatio();
457         overlay_width = lrint(overlay_base_width * dpr);
458         overlay_height = lrint(overlay_base_height * dpr);
459
460         overlay_image.reset(new QImage(overlay_width, overlay_height, QImage::Format_Grayscale8));
461         overlay_image->setDevicePixelRatio(dpr);
462         overlay_image->fill(0);
463         QPainter painter(overlay_image.get());
464
465         painter.setPen(Qt::white);
466         QFont font = painter.font();
467         font.setPointSize(12);
468         painter.setFont(font);
469
470         painter.drawText(QRectF(0, 0, overlay_base_width, overlay_base_height), Qt::AlignCenter, QString::fromStdString(text));
471
472         // Don't refresh immediately; we might not have an OpenGL context here.
473         overlay_input_needs_refresh = true;
474 }