1 #define GL_GLEXT_PROTOTYPES
3 #include "video_widget.h"
15 #include <libavcodec/avcodec.h>
16 #include <libavformat/avformat.h>
17 #include <libavutil/avutil.h>
18 #include <libavutil/error.h>
19 #include <libavutil/frame.h>
20 #include <libavutil/imgutils.h>
21 #include <libavutil/mem.h>
22 #include <libavutil/pixfmt.h>
23 #include <libavutil/opt.h>
24 #include <libswscale/swscale.h>
31 #include <unordered_set>
33 #include "post_to_main_thread.h"
35 #include <QOpenGLFunctions>
36 #include <QWheelEvent>
37 #include <QMouseEvent>
38 #include <QMouseEvent>
39 #include <QHBoxLayout>
41 #define BUFFER_OFFSET(i) ((char *)nullptr + (i))
44 using namespace std::chrono;
48 bool is_full_range(const AVPixFmtDescriptor *desc)
50 // This is horrible, but there's no better way that I know of.
51 return (strchr(desc->name, 'j') != nullptr);
54 AVPixelFormat decide_dst_format(AVPixelFormat src_format)
56 // If this is a non-Y'CbCr format, just convert to 4:4:4 Y'CbCr
57 // and be done with it. It's too strange to spend a lot of time on.
58 // (Let's hope there's no alpha.)
59 const AVPixFmtDescriptor *src_desc = av_pix_fmt_desc_get(src_format);
60 if (src_desc == nullptr ||
61 src_desc->nb_components != 3 ||
62 (src_desc->flags & AV_PIX_FMT_FLAG_RGB)) {
63 return AV_PIX_FMT_YUV444P;
66 // The best for us would be Cb and Cr together if possible,
67 // but FFmpeg doesn't support that except in the special case of
68 // NV12, so we need to go to planar even for the case of NV12.
69 // Thus, look for the closest (but no worse) 8-bit planar Y'CbCr format
70 // that matches in color range. (This will also include the case of
71 // the source format already being acceptable.)
72 bool src_full_range = is_full_range(src_desc);
73 const char *best_format = "yuv444p";
74 unsigned best_score = numeric_limits<unsigned>::max();
75 for (const AVPixFmtDescriptor *desc = av_pix_fmt_desc_next(nullptr);
77 desc = av_pix_fmt_desc_next(desc)) {
78 // Find planar Y'CbCr formats only.
79 if (desc->nb_components != 3) continue;
80 if (desc->flags & AV_PIX_FMT_FLAG_RGB) continue;
81 if (!(desc->flags & AV_PIX_FMT_FLAG_PLANAR)) continue;
82 if (desc->comp[0].plane != 0 ||
83 desc->comp[1].plane != 1 ||
84 desc->comp[2].plane != 2) continue;
86 // 8-bit formats only.
87 if (desc->flags & AV_PIX_FMT_FLAG_BE) continue;
88 if (desc->comp[0].depth != 8) continue;
90 // Same or better chroma resolution only.
91 int chroma_w_diff = src_desc->log2_chroma_w - desc->log2_chroma_w;
92 int chroma_h_diff = src_desc->log2_chroma_h - desc->log2_chroma_h;
93 if (chroma_w_diff < 0 || chroma_h_diff < 0)
96 // Matching full/limited range only.
97 if (is_full_range(desc) != src_full_range)
100 // Pick something with as little excess chroma resolution as possible.
101 unsigned score = (1 << (chroma_w_diff)) << chroma_h_diff;
102 if (score < best_score) {
104 best_format = desc->name;
107 return av_get_pix_fmt(best_format);
112 bool VideoWidget::process_queued_commands(AVFormatContext *format_ctx, AVCodecContext *video_codec_ctx, int video_stream_index, bool *seeked)
114 // Process any queued commands from other threads.
115 vector<QueuedCommand> commands;
117 lock_guard<mutex> lock(queue_mu);
118 swap(commands, command_queue);
121 for (const QueuedCommand &cmd : commands) {
122 switch (cmd.command) {
123 case QueuedCommand::PAUSE:
126 case QueuedCommand::RESUME:
128 pts_origin = last_pts;
129 start = next_frame_start = steady_clock::now();
131 case QueuedCommand::SEEK:
132 case QueuedCommand::SEEK_ABSOLUTE:
138 // Combine all seeks into one big one. (There are edge cases where this is probably
139 // subtly wrong, but we'll live with it.)
140 int64_t base_pts = last_pts;
141 int64_t relative_seek_ms = 0;
142 int64_t relative_seek_frames = 0;
143 for (const QueuedCommand &cmd : commands) {
144 if (cmd.command == QueuedCommand::SEEK) {
145 relative_seek_ms += cmd.relative_seek_ms;
146 relative_seek_frames += cmd.relative_seek_frames;
147 } else if (cmd.command == QueuedCommand::SEEK_ABSOLUTE) {
148 base_pts = av_rescale_q(cmd.seek_ms, AVRational{ 1, 1000 }, video_timebase);
149 relative_seek_ms = 0;
150 relative_seek_frames = 0;
153 int64_t relative_seek_pts = av_rescale_q(relative_seek_ms, AVRational{ 1, 1000 }, video_timebase);
154 if (relative_seek_ms != 0 && relative_seek_pts == 0) {
155 // Just to be sure rounding errors don't move us into nothingness.
156 relative_seek_pts = (relative_seek_ms > 0) ? 1 : -1;
158 int64_t goal_pts = base_pts + relative_seek_pts;
159 if (goal_pts != last_pts || relative_seek_frames < 0) {
160 avcodec_flush_buffers(video_codec_ctx);
161 queued_frames.clear();
163 // Seek to the last keyframe before this point.
164 int64_t seek_pts = goal_pts;
165 if (relative_seek_frames < 0) {
166 // If we're frame-skipping backwards, add 100 ms of slop for each frame
167 // so we're fairly certain we are able to see the ones we want.
168 seek_pts -= av_rescale_q(-relative_seek_frames, AVRational{ 1, 10 }, video_timebase);
170 av_seek_frame(format_ctx, video_stream_index, seek_pts, AVSEEK_FLAG_BACKWARD);
172 // Decode frames until EOF, or until we see something past our seek point.
173 std::deque<AVFrameWithDeleter> queue;
176 AVFrameWithDeleter frame = decode_frame(format_ctx, video_codec_ctx,
177 pathname, video_stream_index, &error);
178 if (frame == nullptr || error) {
182 int64_t frame_pts = frame->pts;
183 if (relative_seek_frames < 0) {
184 // Buffer this frame; don't display it unless we know it's the Nth-latest.
185 queue.push_back(std::move(frame));
186 if (queue.size() > uint64_t(-relative_seek_frames) + 1) {
190 if (frame_pts >= goal_pts) {
191 if (relative_seek_frames > 0) {
192 --relative_seek_frames;
194 if (relative_seek_frames < 0) {
195 // Hope we have the right amount.
196 // The rest will remain in the queue for when we play forward again.
197 frame = std::move(queue.front());
199 queued_frames = std::move(queue);
201 video_window->set_current_frame(make_video_frame(frame.get()));
203 store_pts(frame->pts);
209 // NOTE: We keep pause status as-is.
211 pts_origin = last_pts;
212 start = next_frame_start = last_frame = steady_clock::now();
216 } else if (relative_seek_frames > 0) {
217 // The base PTS is fine, we only need to skip a few frames forwards.
218 while (relative_seek_frames > 1) {
219 // Eat a frame (ignore errors).
221 decode_frame(format_ctx, video_codec_ctx, pathname, video_stream_index, &error);
222 --relative_seek_frames;
225 // Display the last one.
227 AVFrameWithDeleter frame = decode_frame(format_ctx, video_codec_ctx,
228 pathname, video_stream_index, &error);
229 if (frame == nullptr || error) {
232 video_window->set_current_frame(make_video_frame(frame.get()));
234 store_pts(frame->pts);
239 VideoWidget::VideoWidget(QWidget *parent)
241 video_window(new VideoWindow(this)) {
242 setLayout(new QHBoxLayout);
243 layout()->setContentsMargins(QMargins());
244 layout()->addWidget(QWidget::createWindowContainer(video_window));
247 connect(video_window, &VideoWindow::mouse_wheel, this, &VideoWidget::wheelEvent);
248 connect(video_window, &VideoWindow::mouse_pressed, this, &VideoWidget::mousePressEvent);
249 connect(video_window, &VideoWindow::mouse_released, this, &VideoWidget::mouseReleaseEvent);
250 connect(video_window, &VideoWindow::mouse_moved, this, &VideoWidget::mouseMoveEvent);
253 VideoWidget::~VideoWidget()
257 // Qt will delete video_window for us after we're gone,
258 // so make sure its destructor does not try to mess with
259 // our freelist. The actual freelist frames will leak.
260 video_window->set_current_frame(nullptr);
263 GLuint compile_shader(const string &shader_src, GLenum type)
265 GLuint obj = glCreateShader(type);
266 const GLchar* source[] = { shader_src.data() };
267 const GLint length[] = { (GLint)shader_src.size() };
268 glShaderSource(obj, 1, source, length);
269 glCompileShader(obj);
271 GLchar info_log[4096];
272 GLsizei log_length = sizeof(info_log) - 1;
273 glGetShaderInfoLog(obj, log_length, &log_length, info_log);
274 info_log[log_length] = 0;
275 if (strlen(info_log) > 0) {
276 fprintf(stderr, "Shader compile log: %s\n", info_log);
280 glGetShaderiv(obj, GL_COMPILE_STATUS, &status);
281 if (status == GL_FALSE) {
282 // Add some line numbers to easier identify compile errors.
283 string src_with_lines = "/* 1 */ ";
285 for (char ch : shader_src) {
286 src_with_lines.push_back(ch);
289 snprintf(buf, sizeof(buf), "/* %3zu */ ", ++lineno);
290 src_with_lines += buf;
294 fprintf(stderr, "Failed to compile shader:\n%s\n", src_with_lines.c_str());
301 void VideoWindow::initializeGL()
304 glDisable(GL_DEPTH_TEST);
305 glDepthMask(GL_FALSE);
306 glCreateTextures(GL_TEXTURE_2D, 3, tex);
308 ycbcr_vertex_shader = compile_shader(R"(
311 layout(location = 0) in vec2 position;
312 layout(location = 1) in vec2 texcoord;
317 // The result of glOrtho(0.0, 1.0, 0.0, 1.0, 0.0, 1.0) is:
319 // 2.000 0.000 0.000 -1.000
320 // 0.000 2.000 0.000 -1.000
321 // 0.000 0.000 -2.000 -1.000
322 // 0.000 0.000 0.000 1.000
323 gl_Position = vec4(2.0 * position.x - 1.0, 2.0 * position.y - 1.0, -1.0, 1.0);
327 )", GL_VERTEX_SHADER);
328 ycbcr_fragment_shader = compile_shader(R"(
331 layout(location = 0) uniform sampler2D tex_y;
332 layout(location = 1) uniform sampler2D tex_cb;
333 layout(location = 2) uniform sampler2D tex_cr;
334 layout(location = 3) uniform vec2 cbcr_offset;
339 // Computed statically by Movit, for limited-range BT.709.
340 // (We don't check whether the input could be BT.601 or BT.2020 currently, or full-range)
341 const mat3 inv_ycbcr_matrix = mat3(
342 1.16438f, 1.16438f, 1.16438f,
343 0.0f, -0.21325f, 2.11240f,
344 1.79274f, -0.53291f, 0.0f
349 if (tc.x < 0.0 || tc.x > 1.0 || tc.y < 0.0 || tc.y > 1.0) {
350 FragColor.rgba = vec4(0.0f, 0.0f, 0.0f, 1.0f);
355 ycbcr.r = texture(tex_y, tc).r;
356 ycbcr.g = texture(tex_cb, tc + cbcr_offset).r;
357 ycbcr.b = texture(tex_cr, tc + cbcr_offset).r;
358 ycbcr -= vec3(16.0f / 255.0f, 128.0f / 255.0f, 128.0f / 255.0f);
359 FragColor.rgb = inv_ycbcr_matrix * ycbcr;
362 )", GL_FRAGMENT_SHADER);
363 ycbcr_program = glCreateProgram();
364 glAttachShader(ycbcr_program, ycbcr_vertex_shader);
365 glAttachShader(ycbcr_program, ycbcr_fragment_shader);
366 glLinkProgram(ycbcr_program);
369 glGetProgramiv(ycbcr_program, GL_LINK_STATUS, &success);
370 if (success == GL_FALSE) {
371 GLchar error_log[1024] = {0};
372 glGetProgramInfoLog(ycbcr_program, 1024, nullptr, error_log);
373 fprintf(stderr, "Error linking program: %s\n", error_log);
377 glCreateSamplers(1, &bilinear_sampler);
378 glSamplerParameteri(bilinear_sampler, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
379 glSamplerParameteri(bilinear_sampler, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
380 glSamplerParameteri(bilinear_sampler, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
381 glSamplerParameteri(bilinear_sampler, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
384 void VideoWindow::resizeGL(int w, int h)
386 glViewport(0, 0, w, h);
387 display_aspect = double(w) / h;
390 int num_levels(GLuint width, GLuint height)
393 while (width > 1 || height > 1) {
394 width = max(width / 2, 1u);
395 height = max(height / 2, 1u);
401 void VideoWindow::paintGL()
403 std::shared_ptr<VideoWidget::Frame> frame;
405 lock_guard lock(current_frame_mu);
406 frame = current_frame;
408 if (frame == nullptr) {
409 glClear(GL_COLOR_BUFFER_BIT);
413 glUseProgram(ycbcr_program);
414 if (frame->width != last_width || frame->height != last_height) {
415 glTextureStorage2D(tex[0], num_levels(frame->width, frame->height), GL_R8, frame->width, frame->height);
417 if (frame->chroma_width != last_chroma_width || frame->chroma_height != last_chroma_height) {
418 for (GLuint num : { tex[1], tex[2] }) {
419 glTextureStorage2D(num, num_levels(frame->chroma_width, frame->chroma_height), GL_R8, frame->chroma_width, frame->chroma_height);
423 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, frame->pbo);
425 if (frame->need_flush_len > 0) {
426 glFlushMappedNamedBufferRange(frame->pbo, 0, frame->need_flush_len);
427 frame->need_flush_len = 0;
430 glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
431 glTextureSubImage2D(tex[0], 0, 0, 0, frame->width, frame->height, GL_RED, GL_UNSIGNED_BYTE, BUFFER_OFFSET(0));
432 glGenerateTextureMipmap(tex[0]);
434 glTextureSubImage2D(tex[1], 0, 0, 0, frame->chroma_width, frame->chroma_height, GL_RED, GL_UNSIGNED_BYTE, BUFFER_OFFSET(frame->width * frame->height));
435 glGenerateTextureMipmap(tex[1]);
437 glTextureSubImage2D(tex[2], 0, 0, 0, frame->chroma_width, frame->chroma_height, GL_RED, GL_UNSIGNED_BYTE, BUFFER_OFFSET(frame->width * frame->height + frame->chroma_width * frame->chroma_height));
438 glGenerateTextureMipmap(tex[2]);
440 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
442 glBindTextureUnit(0, tex[0]);
443 glBindTextureUnit(1, tex[1]);
444 glBindTextureUnit(2, tex[2]);
445 glBindSampler(0, bilinear_sampler);
446 glBindSampler(1, bilinear_sampler);
447 glBindSampler(2, bilinear_sampler);
448 glProgramUniform1i(ycbcr_program, 0, 0);
449 glProgramUniform1i(ycbcr_program, 1, 1);
450 glProgramUniform1i(ycbcr_program, 2, 2);
451 glProgramUniform2f(ycbcr_program, 3, cbcr_offset[0], -cbcr_offset[1]);
458 double video_aspect = double(frame->width) / frame->height;
459 if (display_aspect > video_aspect) {
460 double extra_width = frame->height * display_aspect - frame->width;
461 tx1 = -0.5 * extra_width / frame->width;
462 tx2 = 1.0 + 0.5 * extra_width / frame->width;
463 } else if (display_aspect < video_aspect) {
464 double extra_height = frame->width / display_aspect - frame->height;
465 ty1 = -0.5 * extra_height / frame->height;
466 ty2 = 1.0 + 0.5 * extra_height / frame->height;
472 glVertexAttrib2f(1, tx1, ty1);
473 glVertex2f(zoom_matrix[2 * 3 + 0], zoom_matrix[2 * 3 + 1]);
476 glVertexAttrib2f(1, tx1, ty2);
477 glVertex2f(zoom_matrix[1 * 3 + 0] + zoom_matrix[2 * 3 + 0], zoom_matrix[1 * 3 + 1] + zoom_matrix[2 * 3 + 1]);
480 glVertexAttrib2f(1, tx2, ty2);
481 glVertex2f(zoom_matrix[0 * 3 + 0] + zoom_matrix[1 * 3 + 0] + zoom_matrix[2 * 3 + 0],
482 zoom_matrix[1 * 3 + 0] + zoom_matrix[1 * 3 + 1] + zoom_matrix[2 * 3 + 1]);
485 glVertexAttrib2f(1, tx2, ty1);
486 glVertex2f(zoom_matrix[0 * 3 + 0] + zoom_matrix[2 * 3 + 0],
487 zoom_matrix[1 * 3 + 0] + zoom_matrix[2 * 3 + 1]);
492 void VideoWindow::set_current_frame(shared_ptr<VideoWidget::Frame> new_frame)
495 lock_guard lock(current_frame_mu);
496 current_frame = std::move(new_frame);
501 void matmul3x3(const double a[9], const double b[9], double res[9])
503 for (int i = 0; i < 3; ++i) {
504 for (int j = 0; j < 3; ++j) {
506 for (int k = 0; k < 3; ++k) {
507 sum += a[i * 3 + k] * b[k * 3 + j];
509 res[i * 3 + j] = sum;
514 void VideoWidget::wheelEvent(QWheelEvent *event)
516 int delta = event->angleDelta().y();
520 double x = event->position().x() / width();
521 double y = 1.0 - event->position().y() / height();
522 double zoom = delta > 0 ? pow(1.005, delta) : pow(1/1.005, -delta);
524 const double inv_translation_matrix[9] = {
529 const double scale_matrix[9] = {
534 const double translation_matrix[9] = {
539 double tmp1[9], tmp2[9];
540 matmul3x3(zoom_matrix, inv_translation_matrix, tmp1);
541 matmul3x3(tmp1, scale_matrix, tmp2);
542 matmul3x3(tmp2, translation_matrix, zoom_matrix);
545 video_window->set_zoom_matrix(zoom_matrix);
549 void VideoWidget::mousePressEvent(QMouseEvent *e)
551 if (e->button() == Qt::BackButton) {
552 emit mouse_back_clicked();
553 } else if (e->button() == Qt::ForwardButton) {
554 emit mouse_forward_clicked();
555 } else if (e->button() == Qt::LeftButton) {
557 last_drag_x = e->position().x();
558 last_drag_y = e->position().y();
562 void VideoWidget::mouseReleaseEvent(QMouseEvent *e)
564 if (e->button() == Qt::LeftButton) {
569 void VideoWidget::mouseMoveEvent(QMouseEvent *e)
574 float dx = (e->position().x() - last_drag_x) / width();
575 float dy = (e->position().y() - last_drag_y) / height();
577 //zoom_matrix[6] += dx * zoom_matrix[0];
578 //zoom_matrix[7] += dy * zoom_matrix[4];
579 zoom_matrix[6] += dx;
580 zoom_matrix[7] -= dy;
582 video_window->set_zoom_matrix(zoom_matrix);
584 last_drag_x = e->position().x();
585 last_drag_y = e->position().y();
590 // Normalize the matrix so that we never get skew or similar,
591 // and also never can zoom or pan too far out.
592 void VideoWidget::fixup_zoom_matrix()
594 // Correct for any numerical errors (we know the matrix must be orthogonal
595 // and have zero rotation).
596 zoom_matrix[4] = zoom_matrix[0];
597 zoom_matrix[1] = zoom_matrix[2] = zoom_matrix[3] = zoom_matrix[5] = 0.0;
598 zoom_matrix[8] = 1.0;
600 // We can't zoom further out than 1:1. (Perhaps it would be nice to
601 // reuse the last zoom-in point to do this, but the center will have to do
603 if (zoom_matrix[0] < 1.0) {
604 const double zoom = 1.0 / zoom_matrix[0];
605 const double inv_translation_matrix[9] = {
610 const double scale_matrix[9] = {
615 const double translation_matrix[9] = {
620 double tmp1[9], tmp2[9];
621 matmul3x3(zoom_matrix, inv_translation_matrix, tmp1);
622 matmul3x3(tmp1, scale_matrix, tmp2);
623 matmul3x3(tmp2, translation_matrix, zoom_matrix);
626 // Looking at the points we'll draw with glVertex2f(), make sure none of them are
627 // inside the square (which would generally mean we've panned ourselves out-of-bounds).
628 // We simply adjust the translation, which is possible because we fixed scaling above.
629 zoom_matrix[6] = min(zoom_matrix[6], 0.0); // Left side (x=0).
630 zoom_matrix[7] = min(zoom_matrix[7], 0.0); // Bottom side (y=0).
631 zoom_matrix[6] = std::max(zoom_matrix[6], 1.0 - zoom_matrix[0]); // Right side (x=1).
632 zoom_matrix[7] = std::max(zoom_matrix[7], 1.0 - zoom_matrix[4]); // Top side (y=1).
635 bool VideoWidget::open(const string &filename)
642 while (running == STARTING) {
643 // Poor man's condition variable...
647 return (running != VIDEO_FILE_ERROR);
650 void VideoWidget::play()
652 if (running != NOT_RUNNING && running != VIDEO_FILE_ERROR) {
653 std::lock_guard<std::mutex> lock(queue_mu);
654 command_queue.push_back(QueuedCommand { QueuedCommand::RESUME });
655 producer_thread_should_quit.wakeup();
659 producer_thread_should_quit.unquit();
660 if (producer_thread.joinable()) {
661 producer_thread.join();
663 producer_thread = std::thread(&VideoWidget::producer_thread_func, this);
666 void VideoWidget::pause()
668 if (running == NOT_RUNNING || running == VIDEO_FILE_ERROR) {
671 std::lock_guard<std::mutex> lock(queue_mu);
672 command_queue.push_back(QueuedCommand { QueuedCommand::PAUSE });
673 producer_thread_should_quit.wakeup();
676 void VideoWidget::seek(int64_t relative_seek_ms)
678 if (running == NOT_RUNNING || running == VIDEO_FILE_ERROR) {
681 std::lock_guard<std::mutex> lock(queue_mu);
682 command_queue.push_back(QueuedCommand { QueuedCommand::SEEK, relative_seek_ms, 0, 0 });
683 producer_thread_should_quit.wakeup();
686 void VideoWidget::seek_frames(int64_t relative_seek_frames)
688 if (running == NOT_RUNNING || running == VIDEO_FILE_ERROR) {
691 std::lock_guard<std::mutex> lock(queue_mu);
692 command_queue.push_back(QueuedCommand { QueuedCommand::SEEK, 0, relative_seek_frames, 0 });
693 producer_thread_should_quit.wakeup();
696 void VideoWidget::seek_absolute(int64_t position_ms)
698 if (running == NOT_RUNNING || running == VIDEO_FILE_ERROR) {
701 std::lock_guard<std::mutex> lock(queue_mu);
702 command_queue.push_back(QueuedCommand { QueuedCommand::SEEK_ABSOLUTE, 0, 0, position_ms });
703 producer_thread_should_quit.wakeup();
706 void VideoWidget::stop()
708 if (running == NOT_RUNNING || running == VIDEO_FILE_ERROR) {
711 producer_thread_should_quit.quit();
712 producer_thread.join();
715 void VideoWidget::producer_thread_func()
717 if (!producer_thread_should_quit.should_quit()) {
718 if (!play_video(pathname)) {
719 running = VIDEO_FILE_ERROR;
721 running = NOT_RUNNING;
726 void VideoWidget::internal_rewind()
728 pts_origin = last_pts = 0;
730 start = next_frame_start = steady_clock::now();
733 template<AVHWDeviceType type>
734 AVPixelFormat get_hw_format(AVCodecContext *ctx, const AVPixelFormat *fmt)
736 bool found_config_of_right_type = false;
737 for (int i = 0;; ++i) { // Termination condition inside loop.
738 const AVCodecHWConfig *config = avcodec_get_hw_config(ctx->codec, i);
739 if (config == nullptr) { // End of list.
742 if (!(config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX) ||
743 config->device_type != type) {
744 // Not interesting for us.
748 // We have a config of the right type, but does it actually support
749 // the pixel format we want? (Seemingly, FFmpeg's way of signaling errors
750 // is to just replace the pixel format with a software-decoded one,
752 found_config_of_right_type = true;
753 for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
754 if (config->pix_fmt == *fmt_ptr) {
755 fprintf(stderr, "Initialized '%s' hardware decoding for codec '%s'.\n",
756 av_hwdevice_get_type_name(type), ctx->codec->name);
757 if (ctx->profile == FF_PROFILE_H264_BASELINE) {
758 fprintf(stderr, "WARNING: Stream claims to be H.264 Baseline, which is generally poorly supported in hardware decoders.\n");
759 fprintf(stderr, " Consider encoding it as Constrained Baseline, Main or High instead.\n");
760 fprintf(stderr, " Decoding might fail and fall back to software.\n");
762 return config->pix_fmt;
765 fprintf(stderr, "Decoder '%s' supports only these pixel formats:", ctx->codec->name);
766 unordered_set<AVPixelFormat> seen;
767 for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
768 if (!seen.count(*fmt_ptr)) {
769 fprintf(stderr, " %s", av_get_pix_fmt_name(*fmt_ptr));
770 seen.insert(*fmt_ptr);
773 fprintf(stderr, " (wanted %s for hardware acceleration)\n", av_get_pix_fmt_name(config->pix_fmt));
777 if (!found_config_of_right_type) {
778 fprintf(stderr, "Decoder '%s' does not support device type '%s'.\n", ctx->codec->name, av_hwdevice_get_type_name(type));
781 // We found no VA-API formats, so take the first software format.
782 for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
783 if ((av_pix_fmt_desc_get(*fmt_ptr)->flags & AV_PIX_FMT_FLAG_HWACCEL) == 0) {
784 fprintf(stderr, "Falling back to software format %s.\n", av_get_pix_fmt_name(*fmt_ptr));
789 // Fallback: Just return anything. (Should never really happen.)
793 AVFrameWithDeleter VideoWidget::decode_frame(AVFormatContext *format_ctx, AVCodecContext *video_codec_ctx,
794 const std::string &pathname, int video_stream_index,
799 if (!queued_frames.empty()) {
800 AVFrameWithDeleter frame = std::move(queued_frames.front());
801 queued_frames.pop_front();
805 // Read packets until we have a frame or there are none left.
806 bool frame_finished = false;
807 AVFrameWithDeleter video_avframe = av_frame_alloc_unique();
810 AVPacket *pkt = av_packet_alloc();
811 unique_ptr<AVPacket, decltype(av_packet_unref)*> pkt_cleanup(
812 pkt, av_packet_unref);
815 if (av_read_frame(format_ctx, pkt) == 0) {
816 if (pkt->stream_index == video_stream_index) {
817 if (avcodec_send_packet(video_codec_ctx, pkt) < 0) {
818 fprintf(stderr, "%s: Cannot send packet to video codec.\n", pathname.c_str());
820 return AVFrameWithDeleter(nullptr);
824 eof = true; // Or error, but ignore that for the time being.
827 // Decode video, if we have a frame.
828 int err = avcodec_receive_frame(video_codec_ctx, video_avframe.get());
830 frame_finished = true;
832 } else if (err != AVERROR(EAGAIN)) {
833 fprintf(stderr, "%s: Cannot receive frame from video codec.\n", pathname.c_str());
835 return AVFrameWithDeleter(nullptr);
840 return video_avframe;
842 return AVFrameWithDeleter(nullptr);
845 int find_stream_index(AVFormatContext *ctx, AVMediaType media_type)
847 for (unsigned i = 0; i < ctx->nb_streams; ++i) {
848 if (ctx->streams[i]->codecpar->codec_type == media_type) {
855 steady_clock::time_point compute_frame_start(int64_t frame_pts, int64_t pts_origin, const AVRational &video_timebase, const steady_clock::time_point &origin, double rate)
857 const duration<double> pts((frame_pts - pts_origin) * double(video_timebase.num) / double(video_timebase.den));
858 return origin + duration_cast<steady_clock::duration>(pts / rate);
861 bool VideoWidget::play_video(const string &pathname)
863 queued_frames.clear();
864 AVFormatContextWithCloser format_ctx = avformat_open_input_unique(pathname.c_str(), /*fmt=*/nullptr,
865 /*options=*/nullptr);
866 if (format_ctx == nullptr) {
867 fprintf(stderr, "%s: Error opening file\n", pathname.c_str());
871 if (avformat_find_stream_info(format_ctx.get(), nullptr) < 0) {
872 fprintf(stderr, "%s: Error finding stream info\n", pathname.c_str());
876 int video_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_VIDEO);
877 if (video_stream_index == -1) {
878 fprintf(stderr, "%s: No video stream found\n", pathname.c_str());
882 // Open video decoder.
883 const AVCodecParameters *video_codecpar = format_ctx->streams[video_stream_index]->codecpar;
884 const AVCodec *video_codec = avcodec_find_decoder(video_codecpar->codec_id);
886 video_timebase = format_ctx->streams[video_stream_index]->time_base;
887 AVCodecContextWithDeleter video_codec_ctx = avcodec_alloc_context3_unique(nullptr);
888 if (avcodec_parameters_to_context(video_codec_ctx.get(), video_codecpar) < 0) {
889 fprintf(stderr, "%s: Cannot fill video codec parameters\n", pathname.c_str());
892 if (video_codec == nullptr) {
893 fprintf(stderr, "%s: Cannot find video decoder\n", pathname.c_str());
897 // Seemingly, it's not too easy to make something that just initializes
898 // “whatever goes”, so we don't get CUDA or VULKAN or whatever here
899 // without enumerating through several different types.
900 // VA-API and VDPAU will do for now. We prioritize VDPAU for the
901 // simple reason that there's a VA-API-via-VDPAU emulation for NVidia
902 // cards that seems to work, but just hangs when trying to transfer the frame.
904 // Note that we don't actually check codec support beforehand,
905 // so if you have a low-end VDPAU device but a high-end VA-API device,
906 // you lose out on the extra codec support from the latter.
907 AVBufferRef *hw_device_ctx = nullptr;
908 if (av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VDPAU, nullptr, nullptr, 0) >= 0) {
909 video_codec_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
910 video_codec_ctx->get_format = get_hw_format<AV_HWDEVICE_TYPE_VDPAU>;
911 } else if (av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, nullptr, nullptr, 0) >= 0) {
912 video_codec_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
913 video_codec_ctx->get_format = get_hw_format<AV_HWDEVICE_TYPE_VAAPI>;
915 fprintf(stderr, "Failed to initialize VA-API or VDPAU for FFmpeg acceleration. Decoding video in software.\n");
918 if (avcodec_open2(video_codec_ctx.get(), video_codec, nullptr) < 0) {
919 fprintf(stderr, "%s: Cannot open video decoder\n", pathname.c_str());
922 unique_ptr<AVCodecContext, decltype(avcodec_close)*> video_codec_ctx_cleanup(
923 video_codec_ctx.get(), avcodec_close);
930 int consecutive_errors = 0;
932 while (!producer_thread_should_quit.should_quit()) {
933 if (process_queued_commands(format_ctx.get(), video_codec_ctx.get(), video_stream_index, /*seeked=*/nullptr)) {
937 producer_thread_should_quit.sleep_for(hours(1));
942 AVFrameWithDeleter frame = decode_frame(format_ctx.get(), video_codec_ctx.get(),
943 pathname, video_stream_index, &error);
945 if (++consecutive_errors >= 100) {
946 fprintf(stderr, "More than 100 consecutive video frames, aborting playback.\n");
952 consecutive_errors = 0;
954 if (frame == nullptr) {
959 // Sleep until it's time to present this frame.
961 if (last_pts == 0 && pts_origin == 0) {
962 pts_origin = frame->pts;
964 steady_clock::time_point now = steady_clock::now();
965 next_frame_start = compute_frame_start(frame->pts, pts_origin, video_timebase, start, rate);
967 if (duration<double>(now - next_frame_start).count() >= 0.1) {
968 // If we don't have enough CPU to keep up, or if we have a live stream
969 // where the initial origin was somehow wrong, we could be behind indefinitely.
970 fprintf(stderr, "%s: Playback %.0f ms behind, resetting time scale\n",
972 1e3 * duration<double>(now - next_frame_start).count());
973 pts_origin = frame->pts;
974 start = next_frame_start = now;
976 bool finished_wakeup;
977 finished_wakeup = producer_thread_should_quit.sleep_until(next_frame_start);
978 if (finished_wakeup) {
979 video_window->set_current_frame(make_video_frame(frame.get()));
980 last_frame = steady_clock::now();
984 if (producer_thread_should_quit.should_quit()) break;
987 if (process_queued_commands(format_ctx.get(), video_codec_ctx.get(), video_stream_index, &seeked)) {
992 // Just paused, so present the frame immediately and then go into deep sleep.
993 video_window->set_current_frame(make_video_frame(frame.get()));
994 last_frame = steady_clock::now();
999 // If we just seeked, drop this frame on the floor and be done.
1005 store_pts(frame->pts);
1010 void VideoWidget::store_pts(int64_t pts)
1013 last_position = lrint(pts * double(video_timebase.num) / double(video_timebase.den) * 1000);
1014 post_to_main_thread([this, last_position{last_position.load()}] {
1015 emit position_changed(last_position);
1019 // Taken from Movit (see the comment there for explanation)
1020 float compute_chroma_offset(float pos, unsigned subsampling_factor, unsigned resolution)
1022 float local_chroma_pos = (0.5 + pos * (subsampling_factor - 1)) / subsampling_factor;
1023 if (fabs(local_chroma_pos - 0.5) < 1e-10) {
1024 // x + (-0) can be optimized away freely, as opposed to x + 0.
1027 return (0.5 - local_chroma_pos) / resolution;
1031 shared_ptr<VideoWidget::Frame> VideoWidget::alloc_frame(unsigned width, unsigned height, unsigned chroma_width, unsigned chroma_height)
1033 lock_guard lock(freelist_mu);
1034 for (auto it = frame_freelist.begin(); it != frame_freelist.end(); ++it) {
1035 if ((*it)->width == width &&
1036 (*it)->height == height &&
1037 (*it)->chroma_width == chroma_width &&
1038 (*it)->chroma_height == chroma_height) {
1040 frame_freelist.erase(it);
1041 return shared_ptr<Frame>{frame, free_frame};
1045 Frame *frame = new Frame;
1046 frame->owner = this;
1047 frame->width = width;
1048 frame->height = height;
1049 frame->chroma_width = chroma_width;
1050 frame->chroma_height = chroma_height;
1052 size_t len = frame->width * frame->height + 2 * frame->chroma_width * frame->chroma_height;
1054 while (!video_window->isValid()) {
1060 condition_variable done_cv;
1063 post_to_main_thread([this, &frame, len, &done, &mu, &done_cv]{
1064 video_window->makeCurrent();
1065 glCreateBuffers(1, &frame->pbo);
1066 glNamedBufferStorage(frame->pbo, len, nullptr, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
1067 frame->data = (uint8_t *)glMapNamedBufferRange(frame->pbo, 0, len, GL_MAP_WRITE_BIT | GL_MAP_FLUSH_EXPLICIT_BIT | GL_MAP_PERSISTENT_BIT);
1068 video_window->doneCurrent();
1070 lock_guard lock(mu);
1072 done_cv.notify_all();
1075 unique_lock lock(mu);
1076 done_cv.wait(lock, [&done]{ return done; });
1079 return shared_ptr<Frame>{frame, free_frame};
1082 void VideoWidget::free_frame(VideoWidget::Frame *frame)
1084 VideoWidget *self = frame->owner;
1085 lock_guard lock(self->freelist_mu);
1086 if (self->frame_freelist.size() >= 16) {
1087 GLuint pbo = frame->pbo;
1088 post_to_main_thread([self, pbo]{
1089 self->video_window->makeCurrent();
1090 glUnmapNamedBuffer(pbo);
1091 glDeleteBuffers(1, &pbo);
1092 self->video_window->doneCurrent();
1094 delete self->frame_freelist.front();
1095 self->frame_freelist.pop_front();
1097 self->frame_freelist.push_back(frame);
1100 shared_ptr<VideoWidget::Frame> VideoWidget::make_video_frame(const AVFrame *frame)
1102 AVFrameWithDeleter sw_frame;
1104 if (frame->format == AV_PIX_FMT_VAAPI ||
1105 frame->format == AV_PIX_FMT_VDPAU) {
1106 // Get the frame down to the CPU. (TODO: See if we can keep it
1107 // on the GPU all the way, since it will be going up again later.
1108 // However, this only works if the OpenGL GPU is the same one.)
1109 sw_frame = av_frame_alloc_unique();
1110 int err = av_hwframe_transfer_data(sw_frame.get(), frame, 0);
1112 fprintf(stderr, "%s: Cannot transfer hardware video frame to software.\n", pathname.c_str());
1114 sw_frame->pts = frame->pts;
1115 sw_frame->pkt_duration = frame->pkt_duration;
1116 frame = sw_frame.get();
1120 if (sws_ctx == nullptr ||
1121 sws_last_width != frame->width ||
1122 sws_last_height != frame->height ||
1123 sws_last_src_format != frame->format) {
1124 sws_dst_format = decide_dst_format(AVPixelFormat(frame->format));
1126 sws_getContext(frame->width, frame->height, AVPixelFormat(frame->format),
1127 frame->width, frame->height, sws_dst_format,
1128 SWS_BICUBIC, nullptr, nullptr, nullptr));
1129 sws_last_width = frame->width;
1130 sws_last_height = frame->height;
1131 sws_last_src_format = frame->format;
1133 if (sws_ctx == nullptr) {
1134 fprintf(stderr, "Could not create scaler context\n");
1138 uint8_t *pic_data[4] = { nullptr, nullptr, nullptr, nullptr };
1139 int linesizes[4] = { 0, 0, 0, 0 };
1140 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
1142 shared_ptr<Frame> video_frame = alloc_frame(
1145 AV_CEIL_RSHIFT(int(frame->width), desc->log2_chroma_w),
1146 AV_CEIL_RSHIFT(int(frame->height), desc->log2_chroma_h));
1148 // We always assume left chroma placement for now.
1149 video_window->set_cbcr_offset(
1150 compute_chroma_offset(0.0f, 1 << desc->log2_chroma_w, video_frame->chroma_width),
1151 compute_chroma_offset(0.5f, 1 << desc->log2_chroma_h, video_frame->chroma_height)
1154 pic_data[0] = video_frame->data;
1155 linesizes[0] = frame->width;
1157 pic_data[1] = pic_data[0] + frame->width * frame->height;
1158 linesizes[1] = video_frame->chroma_width;
1160 pic_data[2] = pic_data[1] + video_frame->chroma_width * video_frame->chroma_height;
1161 linesizes[2] = video_frame->chroma_width;
1163 sws_scale(sws_ctx.get(), frame->data, frame->linesize, 0, frame->height, pic_data, linesizes);
1165 video_frame->need_flush_len = video_frame->width * video_frame->height + 2 * video_frame->chroma_width * video_frame->chroma_height;