X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=jpeg_frame_view.cpp;h=905249a0cfe9e2b7e33c7e4b91b280e7201144f4;hb=215cdc3dced101eaecb864eb33ffe5c4865a0bb4;hp=ef1cded69d9dc98fcf4d26864c6ac3eef76bd667;hpb=cf158af1c2219bd9f5a9bc531fb3c1133d327b45;p=nageru diff --git a/jpeg_frame_view.cpp b/jpeg_frame_view.cpp index ef1cded..905249a 100644 --- a/jpeg_frame_view.cpp +++ b/jpeg_frame_view.cpp @@ -22,6 +22,7 @@ #include "post_to_main_thread.h" #include "vaapi_jpeg_decoder.h" #include "video_stream.h" +#include "ycbcr_converter.h" using namespace movit; using namespace std; @@ -44,12 +45,20 @@ struct LRUFrame { size_t last_used; }; +struct PendingDecode { + JPEGID primary, secondary; + float fade_alpha; // Irrelevant if secondary.stream_idx == -1. + JPEGFrameView *destination; +}; + +thread JPEGFrameView::jpeg_decoder_thread; mutex cache_mu; map cache; // Under cache_mu. condition_variable any_pending_decodes, cache_updated; -deque> pending_decodes; // Under cache_mu. +deque pending_decodes; // Under cache_mu. atomic event_counter{0}; extern QGLWidget *global_share_widget; +extern atomic should_quit; shared_ptr decode_jpeg(const string &filename) { @@ -193,27 +202,26 @@ shared_ptr decode_jpeg_with_cache(JPEGID id, CacheMissBehavior cache_miss return frame; } -void jpeg_decoder_thread() +void jpeg_decoder_thread_func() { size_t num_decoded = 0, num_dropped = 0; pthread_setname_np(pthread_self(), "JPEGDecoder"); - for ( ;; ) { - JPEGID id; - JPEGFrameView *dest; + while (!should_quit.load()) { + PendingDecode decode; CacheMissBehavior cache_miss_behavior = DECODE_IF_NOT_IN_CACHE; { unique_lock lock(cache_mu); // TODO: Perhaps under another lock? any_pending_decodes.wait(lock, [] { - return !pending_decodes.empty(); + return !pending_decodes.empty() || should_quit.load(); }); - id = pending_decodes.front().first; - dest = pending_decodes.front().second; + if (should_quit.load()) break; + decode = pending_decodes.front(); pending_decodes.pop_front(); size_t num_pending = 0; - for (const pair &decode : pending_decodes) { - if (decode.second == dest) { + for (const PendingDecode &other_decode : pending_decodes) { + if (other_decode.destination == decode.destination) { ++num_pending; } } @@ -222,68 +230,103 @@ void jpeg_decoder_thread() } } - bool found_in_cache; - shared_ptr frame; - if (id.interpolated) { - // Interpolated frames are never decoded by us, - // put directly into the cache from VideoStream. - unique_lock lock(cache_mu); - cache_updated.wait(lock, [id] { - return cache.count(id) != 0; - }); - found_in_cache = true; // Don't count it as a decode. + shared_ptr primary_frame, secondary_frame; + bool drop = false; + for (int subframe_idx = 0; subframe_idx < 2; ++subframe_idx) { + const JPEGID &id = (subframe_idx == 0 ? decode.primary : decode.secondary); + if (id.stream_idx == (unsigned)-1) { + // No secondary frame. + continue; + } - auto it = cache.find(id); - assert(it != cache.end()); + bool found_in_cache; + shared_ptr frame; + if (id.interpolated) { + // Interpolated frames are never decoded by us, + // put directly into the cache from VideoStream. + unique_lock lock(cache_mu); + cache_updated.wait(lock, [id] { + return cache.count(id) != 0 || should_quit.load(); + }); + if (should_quit.load()) break; + found_in_cache = true; // Don't count it as a decode. + + auto it = cache.find(id); + assert(it != cache.end()); + + it->second.last_used = event_counter++; + frame = it->second.frame; + if (frame == nullptr) { + // We inserted a nullptr as signal that the frame was never + // interpolated and that we should stop waiting. + // But don't let it linger in the cache anymore. + cache.erase(it); + } + } else { + frame = decode_jpeg_with_cache(id, cache_miss_behavior, &found_in_cache); + } - it->second.last_used = event_counter++; - frame = it->second.frame; if (frame == nullptr) { - // We inserted a nullptr as signal that the frame was never - // interpolated and that we should stop waiting. - // But don't let it linger in the cache anymore. - cache.erase(it); + assert(id.interpolated || cache_miss_behavior == RETURN_NULLPTR_IF_NOT_IN_CACHE); + drop = true; + break; } - } else { - frame = decode_jpeg_with_cache(id, cache_miss_behavior, &found_in_cache); - } - if (frame == nullptr) { - assert(id.interpolated || cache_miss_behavior == RETURN_NULLPTR_IF_NOT_IN_CACHE); + if (!found_in_cache) { + ++num_decoded; + if (num_decoded % 1000 == 0) { + fprintf(stderr, "Decoded %zu images, dropped %zu (%.2f%% dropped)\n", + num_decoded, num_dropped, (100.0 * num_dropped) / (num_decoded + num_dropped)); + } + } + if (subframe_idx == 0) { + primary_frame = move(frame); + } else { + secondary_frame = move(frame); + } + } + if (drop) { ++num_dropped; continue; } - if (!found_in_cache) { - ++num_decoded; - if (num_decoded % 1000 == 0) { - fprintf(stderr, "Decoded %zu images, dropped %zu (%.2f%% dropped)\n", - num_decoded, num_dropped, (100.0 * num_dropped) / (num_decoded + num_dropped)); - } - } - // TODO: Could we get jitter between non-interpolated and interpolated frames here? - dest->setDecodedFrame(frame); + decode.destination->setDecodedFrame(primary_frame, secondary_frame, decode.fade_alpha); } } +void JPEGFrameView::shutdown() +{ + any_pending_decodes.notify_all(); + jpeg_decoder_thread.join(); +} + JPEGFrameView::JPEGFrameView(QWidget *parent) : QGLWidget(parent, global_share_widget) { } -void JPEGFrameView::setFrame(unsigned stream_idx, int64_t pts, bool interpolated) +void JPEGFrameView::setFrame(unsigned stream_idx, int64_t pts, bool interpolated, int secondary_stream_idx, int64_t secondary_pts, float fade_alpha) { - current_stream_idx = stream_idx; + current_stream_idx = stream_idx; // TODO: Does this interact with fades? unique_lock lock(cache_mu); - pending_decodes.emplace_back(JPEGID{ stream_idx, pts, interpolated }, this); + PendingDecode decode; + if (interpolated && secondary_stream_idx != -1) { + // The frame will already be faded for us, so ask for only one; we shouldn't fade it against anything. + decode.primary = create_jpegid_for_interpolated_fade(stream_idx, pts, secondary_stream_idx, secondary_pts); + decode.secondary = JPEGID{ (unsigned)-1, -1, /*interpolated=*/false }; + } else { + decode.primary = JPEGID{ stream_idx, pts, interpolated }; + decode.secondary = JPEGID{ (unsigned)secondary_stream_idx, secondary_pts, /*interpolated=*/false }; + } + decode.fade_alpha = fade_alpha; + decode.destination = this; + pending_decodes.push_back(decode); any_pending_decodes.notify_all(); } -void JPEGFrameView::insert_interpolated_frame(unsigned stream_idx, int64_t pts, shared_ptr frame) +void JPEGFrameView::insert_interpolated_frame(JPEGID id, shared_ptr frame) { - JPEGID id{ stream_idx, pts, true }; - // We rely on the frame not being evicted from the cache before // jpeg_decoder_thread() sees it and can display it (otherwise, // that thread would hang). With a default cache of 1000 elements, @@ -304,37 +347,15 @@ void JPEGFrameView::initializeGL() static once_flag once; call_once(once, [] { resource_pool = new ResourcePool; - std::thread(&jpeg_decoder_thread).detach(); + jpeg_decoder_thread = std::thread(jpeg_decoder_thread_func); }); + ycbcr_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_RGBA, resource_pool)); + ImageFormat inout_format; inout_format.color_space = COLORSPACE_sRGB; inout_format.gamma_curve = GAMMA_sRGB; - ycbcr_format.luma_coefficients = YCBCR_REC_709; - ycbcr_format.full_range = false; - ycbcr_format.num_levels = 256; - ycbcr_format.chroma_subsampling_x = 2; - ycbcr_format.chroma_subsampling_y = 1; - ycbcr_format.cb_x_position = 0.0f; // H.264 -- _not_ JPEG, even though our input is MJPEG-encoded - ycbcr_format.cb_y_position = 0.5f; // Irrelevant. - ycbcr_format.cr_x_position = 0.0f; - ycbcr_format.cr_y_position = 0.5f; - - // Planar Y'CbCr decoding chain. - planar_chain.reset(new EffectChain(1280, 720, resource_pool)); - ycbcr_planar_input = (movit::YCbCrInput *)planar_chain->add_input(new YCbCrInput(inout_format, ycbcr_format, 1280, 720, YCBCR_INPUT_PLANAR)); - planar_chain->add_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED); - planar_chain->set_dither_bits(8); - planar_chain->finalize(); - - // Semiplanar Y'CbCr decoding chain (for images coming from VA-API). - semiplanar_chain.reset(new EffectChain(1280, 720, resource_pool)); - ycbcr_semiplanar_input = (movit::YCbCrInput *)semiplanar_chain->add_input(new YCbCrInput(inout_format, ycbcr_format, 1280, 720, YCBCR_INPUT_SPLIT_Y_AND_CBCR)); - semiplanar_chain->add_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED); - semiplanar_chain->set_dither_bits(8); - semiplanar_chain->finalize(); - overlay_chain.reset(new EffectChain(overlay_base_width, overlay_base_height, resource_pool)); overlay_input = (movit::FlatInput *)overlay_chain->add_input(new FlatInput(inout_format, FORMAT_GRAYSCALE, GL_UNSIGNED_BYTE, overlay_base_width, overlay_base_height)); @@ -363,11 +384,7 @@ void JPEGFrameView::paintGL() } check_error(); - if (current_frame->is_semiplanar) { - semiplanar_chain->render_to_screen(); - } else { - planar_chain->render_to_screen(); - } + current_chain->render_to_screen(); if (overlay_image != nullptr) { if (overlay_input_needs_refresh) { @@ -380,31 +397,21 @@ void JPEGFrameView::paintGL() } } -void JPEGFrameView::setDecodedFrame(std::shared_ptr frame) +namespace { + + +} // namespace + +void JPEGFrameView::setDecodedFrame(shared_ptr frame, shared_ptr secondary_frame, float fade_alpha) { - post_to_main_thread([this, frame] { + post_to_main_thread([this, frame, secondary_frame, fade_alpha] { current_frame = frame; - ycbcr_format.chroma_subsampling_x = frame->chroma_subsampling_x; - ycbcr_format.chroma_subsampling_y = frame->chroma_subsampling_y; - - if (frame->is_semiplanar) { - ycbcr_semiplanar_input->change_ycbcr_format(ycbcr_format); - ycbcr_semiplanar_input->set_width(frame->width); - ycbcr_semiplanar_input->set_height(frame->height); - ycbcr_semiplanar_input->set_pixel_data(0, frame->y.get()); - ycbcr_semiplanar_input->set_pixel_data(1, frame->cbcr.get()); - ycbcr_semiplanar_input->set_pitch(0, frame->pitch_y); - ycbcr_semiplanar_input->set_pitch(1, frame->pitch_chroma); + current_secondary_frame = secondary_frame; + + if (secondary_frame != nullptr) { + current_chain = ycbcr_converter->prepare_chain_for_fade(frame, secondary_frame, fade_alpha); } else { - ycbcr_planar_input->change_ycbcr_format(ycbcr_format); - ycbcr_planar_input->set_width(frame->width); - ycbcr_planar_input->set_height(frame->height); - ycbcr_planar_input->set_pixel_data(0, frame->y.get()); - ycbcr_planar_input->set_pixel_data(1, frame->cb.get()); - ycbcr_planar_input->set_pixel_data(2, frame->cr.get()); - ycbcr_planar_input->set_pitch(0, frame->pitch_y); - ycbcr_planar_input->set_pitch(1, frame->pitch_chroma); - ycbcr_planar_input->set_pitch(2, frame->pitch_chroma); + current_chain = ycbcr_converter->prepare_chain_for_conversion(frame); } update(); });