#include "chroma_subsampler.h"
+#include "embedded_files.h"
+
#include <movit/util.h>
#include <string>
-#include "embedded_files.h"
-
#define BUFFER_OFFSET(i) ((char *)nullptr + (i))
using namespace std;
#include "state.pb.h"
#include <QAbstractTableModel>
-#include <stdint.h>
#include <map>
+#include <stdint.h>
#include <string>
#include <vector>
sqlite3_exec(db, R"(
CREATE TABLE IF NOT EXISTS state (state BLOB);
- )", nullptr, nullptr, nullptr); // Ignore errors.
+ )",
+ nullptr, nullptr, nullptr); // Ignore errors.
sqlite3_exec(db, R"(
CREATE TABLE IF NOT EXISTS settings (settings BLOB);
- )", nullptr, nullptr, nullptr); // Ignore errors.
+ )",
+ nullptr, nullptr, nullptr); // Ignore errors.
sqlite3_exec(db, R"(
DROP TABLE file;
- )", nullptr, nullptr, nullptr); // Ignore errors.
+ )",
+ nullptr, nullptr, nullptr); // Ignore errors.
sqlite3_exec(db, R"(
DROP TABLE frame;
- )", nullptr, nullptr, nullptr); // Ignore errors.
+ )",
+ nullptr, nullptr, nullptr); // Ignore errors.
sqlite3_exec(db, R"(
CREATE TABLE IF NOT EXISTS filev2 (
size BIGINT NOT NULL,
frames BLOB NOT NULL
);
- )", nullptr, nullptr, nullptr); // Ignore errors.
+ )",
+ nullptr, nullptr, nullptr); // Ignore errors.
sqlite3_exec(db, "PRAGMA journal_mode=WAL", nullptr, nullptr, nullptr); // Ignore errors.
sqlite3_exec(db, "PRAGMA synchronous=NORMAL", nullptr, nullptr, nullptr); // Ignore errors.
ret = sqlite3_exec(db, R"(
CREATE TEMPORARY TABLE used_filenames ( filename VARCHAR NOT NULL PRIMARY KEY )
- )", nullptr, nullptr, nullptr);
+ )",
+ nullptr, nullptr, nullptr);
if (ret != SQLITE_OK) {
fprintf(stderr, "CREATE TEMPORARY TABLE: %s\n", sqlite3_errmsg(db));
ret = sqlite3_exec(db, R"(
DELETE FROM filev2 WHERE filename NOT IN ( SELECT filename FROM used_filenames )
- )", nullptr, nullptr, nullptr);
+ )",
+ nullptr, nullptr, nullptr);
if (ret != SQLITE_OK) {
fprintf(stderr, "DELETE: %s\n", sqlite3_errmsg(db));
ret = sqlite3_exec(db, R"(
DROP TABLE used_filenames
- )", nullptr, nullptr, nullptr);
+ )",
+ nullptr, nullptr, nullptr);
if (ret != SQLITE_OK) {
fprintf(stderr, "DROP TABLE: %s\n", sqlite3_errmsg(db));
#ifndef DB_H
#define DB_H 1
+#include "frame_on_disk.h"
#include "state.pb.h"
#include <sqlite3.h>
#include <string>
#include <vector>
-#include "frame_on_disk.h"
-
class DB {
public:
explicit DB(const std::string &filename);
+#include "export.h"
+
#include "clip_list.h"
#include "defs.h"
-#include "export.h"
#include "flags.h"
#include "frame_on_disk.h"
#include "player.h"
#include <QMessageBox>
#include <QProgressDialog>
-
#include <future>
-#include <vector>
-
#include <unistd.h>
+#include <vector>
extern "C" {
#include <libavformat/avformat.h>
// Create the streams. Note that some of them could be without frames
// (we try to maintain the stream indexes in the export).
- vector<AVStream *> video_streams;
+ vector<AVStream *> video_streams;
for (unsigned stream_idx = 0; stream_idx <= last_stream_idx; ++stream_idx) {
AVStream *avstream_video = avformat_new_stream(avctx, nullptr);
if (avstream_video == nullptr) {
fprintf(stderr, "avformat_new_stream() failed\n");
exit(1);
}
- avstream_video->time_base = AVRational{1, TIMEBASE};
+ avstream_video->time_base = AVRational{ 1, TIMEBASE };
avstream_video->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
avstream_video->codecpar->codec_id = AV_CODEC_ID_MJPEG;
avstream_video->codecpar->width = global_flags.width; // Might be wrong, but doesn't matter all that much.
}
}
string jpeg = readers[first_frame_stream_idx].read_frame(first_frame);
- int64_t scaled_pts = av_rescale_q(first_frame.pts, AVRational{1, TIMEBASE},
- video_streams[first_frame_stream_idx]->time_base);
+ int64_t scaled_pts = av_rescale_q(first_frame.pts, AVRational{ 1, TIMEBASE },
+ video_streams[first_frame_stream_idx]->time_base);
buffered_jpegs.emplace_back(BufferedJPEG{ scaled_pts, first_frame_stream_idx, std::move(jpeg) });
if (buffered_jpegs.size() >= 1000) {
if (!write_buffered_jpegs(avctx, buffered_jpegs)) {
if (progress.wasCanceled()) {
unlink(filename.c_str());
return;
- }
+ }
}
if (!write_buffered_jpegs(avctx, buffered_jpegs)) {
progress.setMaximum(100000);
progress.setValue(0);
- double total_length = compute_time_left(clips, {{0, 0.0}});
+ double total_length = compute_time_left(clips, { { 0, 0.0 } });
promise<void> done_promise;
future<void> done = done_promise.get_future();
- std::atomic<double> current_value{0.0};
+ std::atomic<double> current_value{ 0.0 };
size_t clip_idx = 0;
Player player(/*destination=*/nullptr, Player::FILE_STREAM_OUTPUT, closer.release());
done_promise.set_value();
}
});
- player.set_progress_callback([¤t_value, &clips, total_length] (const std::map<size_t, double> &player_progress) {
+ player.set_progress_callback([¤t_value, &clips, total_length](const std::map<size_t, double> &player_progress) {
current_value = 1.0 - compute_time_left(clips, player_progress) / total_length;
});
player.play(clips);
#ifndef _EXPORT_H
#define _EXPORT_H 1
+#include "clip_list.h"
+
#include <string>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-
#include <utility>
using namespace std;
fprintf(stderr, " --tally-url URL URL to get tally color from (polled every 100 ms)\n");
}
-void parse_flags(int argc, char * const argv[])
+void parse_flags(int argc, char *const argv[])
{
static const option long_options[] = {
{ "help", no_argument, 0, OPTION_HELP },
{ "cue-point-padding", required_argument, 0, OPTION_CUE_POINT_PADDING },
{ 0, 0, 0, 0 }
};
- for ( ;; ) {
+ for (;;) {
int option_index = 0;
int c = getopt_long(argc, argv, "w:h:r:q:d:", long_options, &option_index);
#ifndef _FLAGS_H
#define _FLAGS_H
-#include <string>
-
#include "defs.h"
+#include <string>
+
struct Flags {
int width = 1280, height = 720;
std::string stream_source;
extern int flow_initialized_interpolation_quality;
void usage();
-void parse_flags(int argc, char * const argv[]);
+void parse_flags(int argc, char *const argv[]);
#endif // !defined(_FLAGS_H)
#include "embedded_files.h"
#include "gpu_timers.h"
-#include "util.h"
#include "shared/read_file.h"
+#include "util.h"
#include <algorithm>
#include <assert.h>
int find_num_levels(int width, int height)
{
int levels = 1;
- for (int w = width, h = height; w > 1 || h > 1; ) {
+ for (int w = width, h = height; w > 1 || h > 1;) {
w >>= 1;
h >>= 1;
++levels;
GLint success;
glGetProgramiv(program, GL_LINK_STATUS, &success);
if (success == GL_FALSE) {
- GLchar error_log[1024] = {0};
+ GLchar error_log[1024] = { 0 };
glGetProgramInfoLog(program, 1024, nullptr, error_log);
fprintf(stderr, "Error linking program: %s\n", error_log);
exit(1);
bind_sampler(densify_program, uniform_flow_tex, 1, flow_tex, nearest_sampler);
glProgramUniform2f(densify_program, uniform_patch_size,
- float(op.patch_size_pixels) / level_width,
- float(op.patch_size_pixels) / level_height);
+ float(op.patch_size_pixels) / level_width,
+ float(op.patch_size_pixels) / level_height);
glViewport(0, 0, level_width, level_height);
glEnable(GL_BLEND);
: flow_level(op.finest_level),
split_ycbcr_output(split_ycbcr_output),
splat(op),
- blend(split_ycbcr_output) {
+ blend(split_ycbcr_output)
+{
// Set up the vertex data that will be shared between all passes.
float vertices[] = {
0.0f, 1.0f,
void render_to(const std::array<GLuint, num_elements> &textures);
// Convenience wrappers.
- void render_to(GLuint texture0) {
- render_to({{texture0}});
+ void render_to(GLuint texture0)
+ {
+ render_to({ { texture0 } });
}
- void render_to(GLuint texture0, GLuint texture1) {
- render_to({{texture0, texture1}});
+ void render_to(GLuint texture0, GLuint texture1)
+ {
+ render_to({ { texture0, texture1 } });
}
- void render_to(GLuint texture0, GLuint texture1, GLuint texture2) {
- render_to({{texture0, texture1, texture2}});
+ void render_to(GLuint texture0, GLuint texture1, GLuint texture2)
+ {
+ render_to({ { texture0, texture1, texture2 } });
}
- void render_to(GLuint texture0, GLuint texture1, GLuint texture2, GLuint texture3) {
- render_to({{texture0, texture1, texture2, texture3}});
+ void render_to(GLuint texture0, GLuint texture1, GLuint texture2, GLuint texture3)
+ {
+ render_to({ { texture0, texture1, texture2, texture3 } });
}
private:
void render_to(GLuint depth_rb, const std::array<GLuint, num_elements> &textures);
// Convenience wrappers.
- void render_to(GLuint depth_rb, GLuint texture0) {
- render_to(depth_rb, {{texture0}});
+ void render_to(GLuint depth_rb, GLuint texture0)
+ {
+ render_to(depth_rb, { { texture0 } });
}
- void render_to(GLuint depth_rb, GLuint texture0, GLuint texture1) {
- render_to(depth_rb, {{texture0, texture1}});
+ void render_to(GLuint depth_rb, GLuint texture0, GLuint texture1)
+ {
+ render_to(depth_rb, { { texture0, texture1 } });
}
- void render_to(GLuint depth_rb, GLuint texture0, GLuint texture1, GLuint texture2) {
- render_to(depth_rb, {{texture0, texture1, texture2}});
+ void render_to(GLuint depth_rb, GLuint texture0, GLuint texture1, GLuint texture2)
+ {
+ render_to(depth_rb, { { texture0, texture1, texture2 } });
}
- void render_to(GLuint depth_rb, GLuint texture0, GLuint texture1, GLuint texture2, GLuint texture3) {
- render_to(depth_rb, {{texture0, texture1, texture2, texture3}});
+ void render_to(GLuint depth_rb, GLuint texture0, GLuint texture1, GLuint texture2, GLuint texture3)
+ {
+ render_to(depth_rb, { { texture0, texture1, texture2, texture3 } });
}
private:
static constexpr int num_channels = 4;
};
-template <class Type>
+template<class Type>
void finish_one_read(GLuint width, GLuint height)
{
using T = typename Type::type;
}
}
-template <class Type>
+template<class Type>
void schedule_read(GLuint tex, GLuint width, GLuint height, const char *filename0, const char *filename1, const char *flow_filename, const char *ppm_filename)
{
using T = typename Type::type;
if (width1 != width2 || height1 != height2) {
fprintf(stderr, "Image dimensions don't match (%dx%d versus %dx%d)\n",
- width1, height1, width2, height2);
+ width1, height1, width2, height2);
exit(1);
}
GLuint tex0 = load_texture(filename0, &width, &height, WITHOUT_MIPMAPS);
if (width != width1 || height != height1) {
fprintf(stderr, "%s: Image dimensions don't match (%dx%d versus %dx%d)\n",
- filename0, width, height, width1, height1);
+ filename0, width, height, width1, height1);
exit(1);
}
glCopyImageSubData(tex0, GL_TEXTURE_2D, 0, 0, 0, 0, image_tex, GL_TEXTURE_2D_ARRAY, 0, 0, 0, 0, width1, height1, 1);
GLuint tex1 = load_texture(filename1, &width, &height, WITHOUT_MIPMAPS);
if (width != width1 || height != height1) {
fprintf(stderr, "%s: Image dimensions don't match (%dx%d versus %dx%d)\n",
- filename1, width, height, width1, height1);
+ filename1, width, height, width1, height1);
exit(1);
}
glCopyImageSubData(tex1, GL_TEXTURE_2D, 0, 0, 0, 0, image_tex, GL_TEXTURE_2D_ARRAY, 0, 0, 0, 1, width1, height1, 1);
if (width1 != width2 || height1 != height2) {
fprintf(stderr, "Image dimensions don't match (%dx%d versus %dx%d)\n",
- width1, height1, width2, height2);
+ width1, height1, width2, height2);
exit(1);
}
enable_timing = true;
- for ( ;; ) {
+ for (;;) {
int option_index = 0;
int c = getopt_long(argc, argv, "s:i:g:", long_options, &option_index);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 5);
// SDL_GL_SetAttribute(SDL_GL_CONTEXT_FLAGS, SDL_GL_CONTEXT_DEBUG_FLAG);
window = SDL_CreateWindow("OpenGL window",
- SDL_WINDOWPOS_UNDEFINED,
- SDL_WINDOWPOS_UNDEFINED,
- 64, 64,
- SDL_WINDOW_OPENGL | SDL_WINDOW_HIDDEN);
+ SDL_WINDOWPOS_UNDEFINED,
+ SDL_WINDOWPOS_UNDEFINED,
+ 64, 64,
+ SDL_WINDOW_OPENGL | SDL_WINDOW_HIDDEN);
SDL_GLContext context = SDL_GL_CreateContext(window);
assert(context != nullptr);
-#include <fcntl.h>
-#include <unistd.h>
+#include "frame_on_disk.h"
+
+#include "shared/metrics.h"
#include <atomic>
#include <chrono>
+#include <fcntl.h>
#include <mutex>
-
-#include "frame_on_disk.h"
-#include "shared/metrics.h"
+#include <unistd.h>
using namespace std;
using namespace std::chrono;
// There can be multiple FrameReader classes, so make all the metrics static.
once_flag frame_metrics_inited;
-atomic<int64_t> metric_frame_opened_files{0};
-atomic<int64_t> metric_frame_closed_files{0};
-atomic<int64_t> metric_frame_read_bytes{0};
-atomic<int64_t> metric_frame_read_frames{0};
+atomic<int64_t> metric_frame_opened_files{ 0 };
+atomic<int64_t> metric_frame_closed_files{ 0 };
+atomic<int64_t> metric_frame_read_bytes{ 0 };
+atomic<int64_t> metric_frame_read_frames{ 0 };
Summary metric_frame_read_time_seconds;
FrameReader::FrameReader()
{
- call_once(frame_metrics_inited, []{
+ call_once(frame_metrics_inited, [] {
global_metrics.add("frame_opened_files", &metric_frame_opened_files);
global_metrics.add("frame_closed_files", &metric_frame_closed_files);
global_metrics.add("frame_read_bytes", &metric_frame_read_bytes);
global_metrics.add("frame_read_frames", &metric_frame_read_frames);
- vector<double> quantiles{0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99};
+ vector<double> quantiles{ 0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99 };
metric_frame_read_time_seconds.init(quantiles, 60.0);
global_metrics.add("frame_read_time_seconds", &metric_frame_read_time_seconds);
});
#ifndef _FRAME_ON_DISK_H
#define _FRAME_ON_DISK_H 1
+#include "defs.h"
+
#include <algorithm>
#include <mutex>
+#include <stdint.h>
#include <string>
#include <vector>
-#include <stdint.h>
-
-#include "defs.h"
-
extern std::mutex frame_mu;
struct FrameOnDisk {
- int64_t pts = -1; // -1 means empty.
- off_t offset;
- unsigned filename_idx;
- uint32_t size; // Not using size_t saves a few bytes; we can have so many frames.
+ int64_t pts = -1; // -1 means empty.
+ off_t offset;
+ unsigned filename_idx;
+ uint32_t size; // Not using size_t saves a few bytes; we can have so many frames.
};
extern std::vector<FrameOnDisk> frames[MAX_STREAMS]; // Under frame_mu.
extern std::vector<std::string> frame_filenames; // Under frame_mu.
find_last_frame_before(std::vector<FrameOnDisk> &frames, int64_t pts_origin)
{
return std::lower_bound(frames.begin(), frames.end(), pts_origin,
- [](const FrameOnDisk &frame, int64_t pts) { return frame.pts < pts; });
+ [](const FrameOnDisk &frame, int64_t pts) { return frame.pts < pts; });
}
inline std::vector<FrameOnDisk>::iterator
find_first_frame_at_or_after(std::vector<FrameOnDisk> &frames, int64_t pts_origin)
{
return std::upper_bound(frames.begin(), frames.end(), pts_origin - 1,
- [](int64_t pts, const FrameOnDisk &frame) { return pts < frame.pts; });
+ [](int64_t pts, const FrameOnDisk &frame) { return pts < frame.pts; });
}
#endif // !defined(_FRAME_ON_DISK_H)
JPEGDestroyer(jpeg_decompress_struct *dinfo)
: dinfo(dinfo) {}
- ~JPEGDestroyer() {
+ ~JPEGDestroyer()
+ {
jpeg_destroy_decompress(dinfo);
}
struct Frame {
bool is_semiplanar = false;
std::unique_ptr<uint8_t[]> y;
- std::unique_ptr<uint8_t[]> cb, cr; // For planar.
+ std::unique_ptr<uint8_t[]> cb, cr; // For planar.
std::unique_ptr<uint8_t[]> cbcr; // For semiplanar.
unsigned width, height;
unsigned chroma_subsampling_x, chroma_subsampling_y;
unsigned pitch_y, pitch_chroma;
};
-#endif // !defined(_JPEG_FRAME_H)
+#endif // !defined(_JPEG_FRAME_H)
namespace {
// Just an arbitrary order for std::map.
-struct FrameOnDiskLexicalOrder
-{
- bool operator() (const FrameOnDisk &a, const FrameOnDisk &b) const
+struct FrameOnDiskLexicalOrder {
+ bool operator()(const FrameOnDisk &a, const FrameOnDisk &b) const
{
if (a.pts != b.pts)
return a.pts < b.pts;
// There can be multiple JPEGFrameView instances, so make all the metrics static.
once_flag jpeg_metrics_inited;
-atomic<int64_t> metric_jpeg_cache_used_bytes{0}; // Same value as cache_bytes_used.
-atomic<int64_t> metric_jpeg_cache_limit_bytes{size_t(CACHE_SIZE_MB) * 1024 * 1024};
-atomic<int64_t> metric_jpeg_cache_given_up_frames{0};
-atomic<int64_t> metric_jpeg_cache_hit_frames{0};
-atomic<int64_t> metric_jpeg_cache_miss_frames{0};
-atomic<int64_t> metric_jpeg_software_decode_frames{0};
-atomic<int64_t> metric_jpeg_software_fail_frames{0};
-atomic<int64_t> metric_jpeg_vaapi_decode_frames{0};
-atomic<int64_t> metric_jpeg_vaapi_fail_frames{0};
+atomic<int64_t> metric_jpeg_cache_used_bytes{ 0 }; // Same value as cache_bytes_used.
+atomic<int64_t> metric_jpeg_cache_limit_bytes{ size_t(CACHE_SIZE_MB) * 1024 * 1024 };
+atomic<int64_t> metric_jpeg_cache_given_up_frames{ 0 };
+atomic<int64_t> metric_jpeg_cache_hit_frames{ 0 };
+atomic<int64_t> metric_jpeg_cache_miss_frames{ 0 };
+atomic<int64_t> metric_jpeg_software_decode_frames{ 0 };
+atomic<int64_t> metric_jpeg_software_fail_frames{ 0 };
+atomic<int64_t> metric_jpeg_vaapi_decode_frames{ 0 };
+atomic<int64_t> metric_jpeg_vaapi_fail_frames{ 0 };
} // namespace
size_t cache_bytes_used = 0; // Under cache_mu.
condition_variable any_pending_decodes;
deque<PendingDecode> pending_decodes; // Under cache_mu.
-atomic<size_t> event_counter{0};
+atomic<size_t> event_counter{ 0 };
extern QGLWidget *global_share_widget;
extern atomic<bool> should_quit;
jpeg_decompress_struct dinfo;
JPEGWrapErrorManager error_mgr(&dinfo);
- if (!error_mgr.run([&dinfo]{ jpeg_create_decompress(&dinfo); })) {
+ if (!error_mgr.run([&dinfo] { jpeg_create_decompress(&dinfo); })) {
return get_black_frame();
}
JPEGDestroyer destroy_dinfo(&dinfo);
- if (!error_mgr.run([&dinfo, &jpeg]{
- jpeg_mem_src(&dinfo, reinterpret_cast<const unsigned char *>(jpeg.data()), jpeg.size());
- jpeg_read_header(&dinfo, true);
- })) {
+ if (!error_mgr.run([&dinfo, &jpeg] {
+ jpeg_mem_src(&dinfo, reinterpret_cast<const unsigned char *>(jpeg.data()), jpeg.size());
+ jpeg_read_header(&dinfo, true);
+ })) {
return get_black_frame();
}
if (dinfo.num_components != 3) {
fprintf(stderr, "Not a color JPEG. (%d components, Y=%dx%d, Cb=%dx%d, Cr=%dx%d)\n",
- dinfo.num_components,
- dinfo.comp_info[0].h_samp_factor, dinfo.comp_info[0].v_samp_factor,
- dinfo.comp_info[1].h_samp_factor, dinfo.comp_info[1].v_samp_factor,
- dinfo.comp_info[2].h_samp_factor, dinfo.comp_info[2].v_samp_factor);
+ dinfo.num_components,
+ dinfo.comp_info[0].h_samp_factor, dinfo.comp_info[0].v_samp_factor,
+ dinfo.comp_info[1].h_samp_factor, dinfo.comp_info[1].v_samp_factor,
+ dinfo.comp_info[2].h_samp_factor, dinfo.comp_info[2].v_samp_factor);
return get_black_frame();
}
if (dinfo.comp_info[0].h_samp_factor != dinfo.max_h_samp_factor ||
(dinfo.max_h_samp_factor % dinfo.comp_info[1].h_samp_factor) != 0 ||
(dinfo.max_v_samp_factor % dinfo.comp_info[1].v_samp_factor) != 0) { // No 2:3 subsampling or other weirdness.
fprintf(stderr, "Unsupported subsampling scheme. (Y=%dx%d, Cb=%dx%d, Cr=%dx%d)\n",
- dinfo.comp_info[0].h_samp_factor, dinfo.comp_info[0].v_samp_factor,
- dinfo.comp_info[1].h_samp_factor, dinfo.comp_info[1].v_samp_factor,
- dinfo.comp_info[2].h_samp_factor, dinfo.comp_info[2].v_samp_factor);
+ dinfo.comp_info[0].h_samp_factor, dinfo.comp_info[0].v_samp_factor,
+ dinfo.comp_info[1].h_samp_factor, dinfo.comp_info[1].v_samp_factor,
+ dinfo.comp_info[2].h_samp_factor, dinfo.comp_info[2].v_samp_factor);
exit(1);
}
dinfo.raw_data_out = true;
- if (!error_mgr.run([&dinfo]{
- jpeg_start_decompress(&dinfo);
- })) {
+ if (!error_mgr.run([&dinfo] {
+ jpeg_start_decompress(&dinfo);
+ })) {
return get_black_frame();
}
frame->pitch_chroma = chroma_width_blocks * DCTSIZE;
if (!error_mgr.run([&dinfo, &frame, v_mcu_size, mcu_height_blocks] {
- JSAMPROW yptr[v_mcu_size], cbptr[v_mcu_size], crptr[v_mcu_size];
- JSAMPARRAY data[3] = { yptr, cbptr, crptr };
- for (unsigned y = 0; y < mcu_height_blocks; ++y) {
- // NOTE: The last elements of cbptr/crptr will be unused for vertically subsampled chroma.
- for (unsigned yy = 0; yy < v_mcu_size; ++yy) {
- yptr[yy] = frame->y.get() + (y * DCTSIZE * dinfo.max_v_samp_factor + yy) * frame->pitch_y;
- cbptr[yy] = frame->cb.get() + (y * DCTSIZE * dinfo.comp_info[1].v_samp_factor + yy) * frame->pitch_chroma;
- crptr[yy] = frame->cr.get() + (y * DCTSIZE * dinfo.comp_info[1].v_samp_factor + yy) * frame->pitch_chroma;
- }
-
- jpeg_read_raw_data(&dinfo, data, v_mcu_size);
- }
-
- (void)jpeg_finish_decompress(&dinfo);
- })) {
+ JSAMPROW yptr[v_mcu_size], cbptr[v_mcu_size], crptr[v_mcu_size];
+ JSAMPARRAY data[3] = { yptr, cbptr, crptr };
+ for (unsigned y = 0; y < mcu_height_blocks; ++y) {
+ // NOTE: The last elements of cbptr/crptr will be unused for vertically subsampled chroma.
+ for (unsigned yy = 0; yy < v_mcu_size; ++yy) {
+ yptr[yy] = frame->y.get() + (y * DCTSIZE * dinfo.max_v_samp_factor + yy) * frame->pitch_y;
+ cbptr[yy] = frame->cb.get() + (y * DCTSIZE * dinfo.comp_info[1].v_samp_factor + yy) * frame->pitch_chroma;
+ crptr[yy] = frame->cr.get() + (y * DCTSIZE * dinfo.comp_info[1].v_samp_factor + yy) * frame->pitch_chroma;
+ }
+
+ jpeg_read_raw_data(&dinfo, data, v_mcu_size);
+ }
+
+ (void)jpeg_finish_decompress(&dinfo);
+ })) {
return get_black_frame();
}
{
// Assumes cache_mu is held.
int64_t bytes_still_to_remove = cache_bytes_used - (size_t(CACHE_SIZE_MB) * 1024 * 1024) * 9 / 10;
- if (bytes_still_to_remove <= 0) return;
+ if (bytes_still_to_remove <= 0)
+ return;
vector<pair<size_t, size_t>> lru_timestamps_and_size;
for (const auto &key_and_value : cache) {
for (const pair<size_t, size_t> &it : lru_timestamps_and_size) {
lru_cutoff_point = it.first;
bytes_still_to_remove -= it.second;
- if (bytes_still_to_remove <= 0) break;
+ if (bytes_still_to_remove <= 0)
+ break;
}
- for (auto it = cache.begin(); it != cache.end(); ) {
+ for (auto it = cache.begin(); it != cache.end();) {
if (it->second.last_used <= lru_cutoff_point) {
cache_bytes_used -= frame_size(*it->second.frame);
metric_jpeg_cache_used_bytes = cache_bytes_used;
++num_decoded;
if (num_decoded % 1000 == 0) {
fprintf(stderr, "Decoded %zu images, dropped %zu (%.2f%% dropped)\n",
- num_decoded, num_dropped, (100.0 * num_dropped) / (num_decoded + num_dropped));
+ num_decoded, num_dropped, (100.0 * num_dropped) / (num_decoded + num_dropped));
}
}
if (subframe_idx == 0) {
JPEGFrameView::JPEGFrameView(QWidget *parent)
: QGLWidget(parent, global_share_widget)
{
- call_once(jpeg_metrics_inited, []{
+ call_once(jpeg_metrics_inited, [] {
global_metrics.add("jpeg_cache_used_bytes", &metric_jpeg_cache_used_bytes, Metrics::TYPE_GAUGE);
global_metrics.add("jpeg_cache_limit_bytes", &metric_jpeg_cache_limit_bytes, Metrics::TYPE_GAUGE);
- global_metrics.add("jpeg_cache_frames", {{ "action", "given_up" }}, &metric_jpeg_cache_given_up_frames);
- global_metrics.add("jpeg_cache_frames", {{ "action", "hit" }}, &metric_jpeg_cache_hit_frames);
- global_metrics.add("jpeg_cache_frames", {{ "action", "miss" }}, &metric_jpeg_cache_miss_frames);
- global_metrics.add("jpeg_decode_frames", {{ "decoder", "software" }, { "result", "decode" }}, &metric_jpeg_software_decode_frames);
- global_metrics.add("jpeg_decode_frames", {{ "decoder", "software" }, { "result", "fail" }}, &metric_jpeg_software_fail_frames);
- global_metrics.add("jpeg_decode_frames", {{ "decoder", "vaapi" }, { "result", "decode" }}, &metric_jpeg_vaapi_decode_frames);
- global_metrics.add("jpeg_decode_frames", {{ "decoder", "vaapi" }, { "result", "fail" }}, &metric_jpeg_vaapi_fail_frames);
+ global_metrics.add("jpeg_cache_frames", { { "action", "given_up" } }, &metric_jpeg_cache_given_up_frames);
+ global_metrics.add("jpeg_cache_frames", { { "action", "hit" } }, &metric_jpeg_cache_hit_frames);
+ global_metrics.add("jpeg_cache_frames", { { "action", "miss" } }, &metric_jpeg_cache_miss_frames);
+ global_metrics.add("jpeg_decode_frames", { { "decoder", "software" }, { "result", "decode" } }, &metric_jpeg_software_decode_frames);
+ global_metrics.add("jpeg_decode_frames", { { "decoder", "software" }, { "result", "fail" } }, &metric_jpeg_software_fail_frames);
+ global_metrics.add("jpeg_decode_frames", { { "decoder", "vaapi" }, { "result", "decode" } }, &metric_jpeg_vaapi_decode_frames);
+ global_metrics.add("jpeg_decode_frames", { { "decoder", "vaapi" }, { "result", "fail" } }, &metric_jpeg_vaapi_fail_frames);
});
}
-#include <assert.h>
#include <arpa/inet.h>
+#include <assert.h>
#include <atomic>
#include <chrono>
#include <condition_variable>
}
#include "clip_list.h"
-#include "shared/context.h"
#include "defs.h"
-#include "shared/disk_space_estimator.h"
-#include "shared/ffmpeg_raii.h"
#include "flags.h"
-#include "frame_on_disk.h"
#include "frame.pb.h"
-#include "shared/httpd.h"
+#include "frame_on_disk.h"
#include "mainwindow.h"
#include "player.h"
+#include "shared/context.h"
+#include "shared/disk_space_estimator.h"
+#include "shared/ffmpeg_raii.h"
+#include "shared/httpd.h"
+#include "shared/metrics.h"
#include "shared/post_to_main_thread.h"
#include "shared/ref_counted_gl_sync.h"
#include "shared/timebase.h"
-#include "shared/metrics.h"
#include "ui_mainwindow.h"
#include "vaapi_jpeg_decoder.h"
#include <QApplication>
#include <QGLFormat>
-#include <QSurfaceFormat>
#include <QProgressDialog>
+#include <QSurfaceFormat>
#include <movit/init.h>
#include <movit/util.h>
constexpr size_t frame_magic_len = 8;
mutex RefCountedGLsync::fence_lock;
-atomic<bool> should_quit{false};
+atomic<bool> should_quit{ false };
int64_t start_pts = -1;
vector<FrameOnDisk> frames[MAX_STREAMS]; // Under frame_mu.
vector<string> frame_filenames; // Under frame_mu.
-atomic<int64_t> metric_received_frames[MAX_STREAMS]{{0}};
+atomic<int64_t> metric_received_frames[MAX_STREAMS]{ { 0 } };
Summary metric_received_frame_size_bytes;
namespace {
if (open_frame_files.count(stream_idx) == 0) {
char filename[256];
snprintf(filename, sizeof(filename), "%s/frames/cam%d-pts%09ld.frames",
- global_flags.working_directory.c_str(), stream_idx, pts);
+ global_flags.working_directory.c_str(), stream_idx, pts);
FILE *fp = fopen(filename, "wb");
if (fp == nullptr) {
perror(filename);
return frame;
}
-} // namespace
+} // namespace
HTTPD *global_httpd;
// OK, found the magic. Try to parse the frame header.
magic_offset = 0;
- if (skipped_bytes > 0) {
+ if (skipped_bytes > 0) {
fprintf(stderr, "WARNING: %s: Skipped %zu garbage bytes in the middle.\n",
- filename, skipped_bytes);
+ filename, skipped_bytes);
skipped_bytes = 0;
}
if (skipped_bytes > 0) {
fprintf(stderr, "WARNING: %s: Skipped %zu garbage bytes at the end.\n",
- filename, skipped_bytes);
+ filename, skipped_bytes);
}
off_t size = ftell(fp);
}
vector<string> frame_basenames;
- for ( ;; ) {
+ for (;;) {
errno = 0;
dirent *de = readdir(dir);
if (de == nullptr) {
for (int stream_idx = 0; stream_idx < MAX_STREAMS; ++stream_idx) {
sort(frames[stream_idx].begin(), frames[stream_idx].end(),
- [](const auto &a, const auto &b) { return a.pts < b.pts; });
+ [](const auto &a, const auto &b) { return a.pts < b.pts; });
}
db.clean_unused_frame_files(frame_basenames);
void record_thread_func()
{
for (unsigned i = 0; i < MAX_STREAMS; ++i) {
- global_metrics.add("received_frames", {{ "stream", to_string(i) }}, &metric_received_frames[i]);
+ global_metrics.add("received_frames", { { "stream", to_string(i) } }, &metric_received_frames[i]);
}
global_metrics.add("received_frame_size_bytes", &metric_received_frame_size_bytes);
while (!should_quit.load()) {
AVPacket pkt;
- unique_ptr<AVPacket, decltype(av_packet_unref)*> pkt_cleanup(
+ unique_ptr<AVPacket, decltype(av_packet_unref) *> pkt_cleanup(
&pkt, av_packet_unref);
av_init_packet(&pkt);
pkt.data = nullptr;
#include "mainwindow.h"
-#include "shared/aboutdialog.h"
#include "clip_list.h"
#include "export.h"
-#include "shared/disk_space_estimator.h"
#include "flags.h"
#include "frame_on_disk.h"
#include "player.h"
+#include "shared/aboutdialog.h"
+#include "shared/disk_space_estimator.h"
#include "shared/post_to_main_thread.h"
#include "shared/timebase.h"
#include "ui_mainwindow.h"
});
// TODO: support drag-and-drop.
- connect(ui->playlist_move_up_btn, &QPushButton::clicked, [this]{ playlist_move(-1); });
- connect(ui->playlist_move_down_btn, &QPushButton::clicked, [this]{ playlist_move(1); });
+ connect(ui->playlist_move_up_btn, &QPushButton::clicked, [this] { playlist_move(-1); });
+ connect(ui->playlist_move_down_btn, &QPushButton::clicked, [this] { playlist_move(1); });
connect(ui->playlist->selectionModel(), &QItemSelectionModel::selectionChanged,
- this, &MainWindow::playlist_selection_changed);
+ this, &MainWindow::playlist_selection_changed);
playlist_selection_changed(); // First time set-up.
preview_player.reset(new Player(ui->preview_display, Player::NO_STREAM_OUTPUT));
live_player.reset(new Player(ui->live_display, Player::HTTPD_STREAM_OUTPUT));
- live_player->set_done_callback([this]{
- post_to_main_thread([this]{
+ live_player->set_done_callback([this] {
+ post_to_main_thread([this] {
live_player_clip_done();
});
});
ui->undo_action->setEnabled(true);
connect(ui->clip_list->selectionModel(), &QItemSelectionModel::currentChanged,
- this, &MainWindow::clip_list_selection_changed);
+ this, &MainWindow::clip_list_selection_changed);
// Find out how many cameras we have in the existing frames;
// if none, we start with two cameras.
QShortcut *shortcut = new QShortcut(QKeySequence(Qt::Key_1 + i), this);
connect(shortcut, &QShortcut::activated, preview_btn, &QPushButton::click);
- connect(preview_btn, &QPushButton::clicked, [this, i]{ preview_angle_clicked(i); });
+ connect(preview_btn, &QPushButton::clicked, [this, i] { preview_angle_clicked(i); });
}
cliplist_clips->change_num_cameras(num_cameras);
if (selected->hasSelection()) {
QModelIndex index = selected->currentIndex();
const Clip &clip = *playlist_clips->clip(index.row());
- preview_player->play({clip});
+ preview_player->play({ clip });
return;
}
}
QItemSelectionModel *selected = ui->clip_list->selectionModel();
if (!selected->hasSelection()) {
- preview_player->play({*cliplist_clips->back()});
+ preview_player->play({ *cliplist_clips->back() });
return;
}
} else {
clip.stream_idx = ui->preview_display->get_stream_idx();
}
- preview_player->play({clip});
+ preview_player->play({ clip });
}
void MainWindow::preview_angle_clicked(unsigned stream_idx)
clips.push_back(*playlist_clips->clip(row));
}
live_player->play(clips);
- playlist_clips->set_progress({{ start_row, 0.0f }});
+ playlist_clips->set_progress({ { start_row, 0.0f } });
playlist_clips->set_currently_playing(start_row, 0.0f);
playlist_selection_changed();
size_t last_row = playlist_clips->size() - 1;
playlist_clips->set_currently_playing(last_row, 0.0f);
live_player_index_to_row.clear();
- live_player->play({fake_clip});
+ live_player->play({ fake_clip });
}
void MainWindow::live_player_clip_done()
playlist_clips->set_progress({});
playlist_clips->set_currently_playing(-1, 0.0f);
} else {
- playlist_clips->set_progress({{ row + 1, 0.0f }});
+ playlist_clips->set_progress({ { row + 1, 0.0f } });
playlist_clips->set_currently_playing(row + 1, 0.0f);
}
ui->stop_btn->setEnabled(false);
}
int column = destination->columnAt(wheel->x());
int row = destination->rowAt(wheel->y());
- if (column == -1 || row == -1) return false;
+ if (column == -1 || row == -1)
+ return false;
// Only adjust pts with the wheel if the given row is selected.
if (!destination->hasFocus() ||
currently_deferring_model_changes = true;
{
current_change_id = (watched == ui->clip_list->viewport()) ? "cliplist:" : "playlist:";
- ClipProxy clip = (watched == ui->clip_list->viewport()) ?
- cliplist_clips->mutable_clip(row) : playlist_clips->mutable_clip(row);
+ ClipProxy clip = (watched == ui->clip_list->viewport()) ? cliplist_clips->mutable_clip(row) : playlist_clips->mutable_clip(row);
if (watched == ui->playlist->viewport()) {
stream_idx = clip->stream_idx;
}
Clip fake_clip;
fake_clip.pts_in = pts;
fake_clip.pts_out = pts + 1;
- preview_player->play({fake_clip});
+ preview_player->play({ fake_clip });
}
void MainWindow::playlist_selection_changed()
for (size_t row = 0; row < playlist_clips->size(); ++row) {
clips.push_back(*playlist_clips->clip(row));
}
- double remaining = compute_time_left(clips, {{selected->selectedRows().front().row(), 0.0}});
+ double remaining = compute_time_left(clips, { { selected->selectedRows().front().row(), 0.0 } });
set_output_status(format_duration(remaining) + " ready");
}
}
msgbox.setText(QString::fromStdString(
"The interpolation quality for the main output cannot be changed at runtime, "
"except being turned completely off; it will take effect for exported files "
- "only until next restart. The live output quality thus remains at " + to_string(flow_initialized_interpolation_quality) + "."));
+ "only until next restart. The live output quality thus remains at " +
+ to_string(flow_initialized_interpolation_quality) + "."));
msgbox.exec();
}
queue_status = status;
}
-pair<string, string> MainWindow::get_queue_status() const {
+pair<string, string> MainWindow::get_queue_status() const
+{
lock_guard<mutex> lock(queue_status_mu);
- return {queue_status, "text/plain"};
+ return { queue_status, "text/plain" };
}
void MainWindow::display_frame(unsigned stream_idx, const FrameOnDisk &frame)
return;
}
if (stream_idx >= num_cameras) {
- post_to_main_thread_and_wait([this, stream_idx]{
+ post_to_main_thread_and_wait([this, stream_idx] {
num_cameras = stream_idx + 1;
change_num_cameras();
});
displays[stream_idx].display->setFrame(stream_idx, frame);
}
-template <class Model>
+template<class Model>
void MainWindow::replace_model(QTableView *view, Model **model, Model *new_model)
{
QItemSelectionModel *old_selection_model = view->selectionModel();
unsigned time_to_next_tally_ms;
if (http_reply->error()) {
fprintf(stderr, "HTTP get of '%s' failed: %s\n", global_flags.tally_url.c_str(),
- http_reply->errorString().toStdString().c_str());
+ http_reply->errorString().toStdString().c_str());
ui->live_frame->setStyleSheet("");
time_to_next_tally_ms = 1000;
} else {
#include "db.h"
#include "state.pb.h"
-#include <deque>
-#include <memory>
-#include <mutex>
#include <QLabel>
#include <QMainWindow>
#include <QNetworkAccessManager>
+#include <deque>
+#include <memory>
+#include <mutex>
#include <stdbool.h>
-#include <sys/types.h>
#include <string>
+#include <sys/types.h>
#include <utility>
namespace Ui {
int64_t scrub_pts_origin;
// Which element (e.g. pts_in on clip 4) we are scrubbing.
- enum ScrubType { SCRUBBING_CLIP_LIST, SCRUBBING_PLAYLIST } scrub_type;
+ enum ScrubType { SCRUBBING_CLIP_LIST,
+ SCRUBBING_PLAYLIST } scrub_type;
int scrub_row;
int scrub_column;
void state_changed(const StateProto &state); // Called post-filtering.
void save_settings();
- enum Rounding { FIRST_AT_OR_AFTER, LAST_BEFORE };
+ enum Rounding { FIRST_AT_OR_AFTER,
+ LAST_BEFORE };
void preview_single_frame(int64_t pts, unsigned stream_idx, Rounding rounding);
// Also covers when the playlist itself changes.
void highlight_camera_input(int stream_idx);
- template <class Model>
+ template<class Model>
void replace_model(QTableView *view, Model **model, Model *new_model);
void start_tally();
#include "player.h"
#include "clip_list.h"
-#include "shared/context.h"
#include "defs.h"
-#include "shared/ffmpeg_raii.h"
#include "flags.h"
#include "frame_on_disk.h"
-#include "shared/httpd.h"
#include "jpeg_frame_view.h"
+#include "shared/context.h"
+#include "shared/ffmpeg_raii.h"
+#include "shared/httpd.h"
#include "shared/metrics.h"
#include "shared/mux.h"
#include "shared/timebase.h"
steady_clock::duration time_behind = steady_clock::now() - next_frame_start;
if (stream_output != FILE_STREAM_OUTPUT && time_behind >= milliseconds(200)) {
fprintf(stderr, "WARNING: %ld ms behind, dropping a frame (no matter the type).\n",
- lrint(1e3 * duration<double>(time_behind).count()));
+ lrint(1e3 * duration<double>(time_behind).count()));
++metric_dropped_unconditional_frame;
continue;
}
-
// pts not affected by the swapping below.
int64_t in_pts_for_progress = in_pts, in_pts_secondary_for_progress = -1;
if (progress_callback != nullptr) {
// NOTE: None of this will take into account any snapping done below.
- map<size_t, double> progress{{ clip_idx, calc_progress(clip, in_pts_for_progress) }};
+ map<size_t, double> progress{ { clip_idx, calc_progress(clip, in_pts_for_progress) } };
if (next_clip != nullptr && time_left_this_clip <= next_clip_fade_time) {
progress[clip_idx + 1] = calc_progress(*next_clip, in_pts_secondary_for_progress);
}
unique_lock<mutex> lock(queue_state_mu);
if (video_stream == nullptr) {
// No queue, just wait until the right time and then show the frame.
- new_clip_changed.wait_until(lock, next_frame_start, [this]{
+ new_clip_changed.wait_until(lock, next_frame_start, [this] {
return should_quit || new_clip_ready || override_stream_idx != -1;
});
if (should_quit) {
//
// In this case, we don't sleep until next_frame_start; the displaying is
// done by the queue.
- new_clip_changed.wait(lock, [this]{
+ new_clip_changed.wait(lock, [this] {
if (num_queued_frames < max_queued_frames) {
return true;
}
for (FrameOnDisk snap_frame : { frame_lower, frame_upper }) {
if (fabs(snap_frame.pts - in_pts) < pts_snap_tolerance) {
display_single_frame(primary_stream_idx, snap_frame, secondary_stream_idx,
- secondary_frame, fade_alpha, next_frame_start, /*snapped=*/true);
+ secondary_frame, fade_alpha, next_frame_start, /*snapped=*/true);
in_pts_origin += snap_frame.pts - in_pts;
snapped = true;
break;
// decorrelated with no common factor, of course (e.g. 12.345 → 34.567, which we should
// really never see in practice).
for (double fraction : { 1.0 / 2.0, 1.0 / 3.0, 2.0 / 3.0, 1.0 / 4.0, 3.0 / 4.0,
- 1.0 / 5.0, 2.0 / 5.0, 3.0 / 5.0, 4.0 / 5.0 }) {
+ 1.0 / 5.0, 2.0 / 5.0, 3.0 / 5.0, 4.0 / 5.0 }) {
double subsnap_pts = frame_lower.pts + fraction * (frame_upper.pts - frame_lower.pts);
if (fabs(subsnap_pts - in_pts) < pts_snap_tolerance) {
in_pts_origin += lrint(subsnap_pts) - in_pts;
if (stream_output != FILE_STREAM_OUTPUT && time_behind >= milliseconds(100)) {
fprintf(stderr, "WARNING: %ld ms behind, dropping an interpolated frame.\n",
- lrint(1e3 * duration<double>(time_behind).count()));
+ lrint(1e3 * duration<double>(time_behind).count()));
++metric_dropped_interpolated_frame;
continue;
}
void Player::display_single_frame(int primary_stream_idx, const FrameOnDisk &primary_frame, int secondary_stream_idx, const FrameOnDisk &secondary_frame, double fade_alpha, steady_clock::time_point frame_start, bool snapped)
{
- auto display_func = [this, primary_stream_idx, primary_frame, secondary_frame, fade_alpha]{
+ auto display_func = [this, primary_stream_idx, primary_frame, secondary_frame, fade_alpha] {
if (destination != nullptr) {
destination->setFrame(primary_stream_idx, primary_frame, secondary_frame, fade_alpha);
}
++metric_faded_frame;
}
video_stream->schedule_faded_frame(frame_start, pts, display_func,
- QueueSpotHolder(this), primary_frame,
- secondary_frame, fade_alpha);
+ QueueSpotHolder(this), primary_frame,
+ secondary_frame, fade_alpha);
}
}
last_pts_played = primary_frame.pts;
player_thread = thread(&Player::thread_func, this, file_avctx);
if (stream_output == HTTPD_STREAM_OUTPUT) {
- global_metrics.add("http_output_frames", {{ "type", "original" }, { "reason", "edge_frame_or_no_interpolation" }}, &metric_original_frame);
- global_metrics.add("http_output_frames", {{ "type", "faded" }, { "reason", "edge_frame_or_no_interpolation" }}, &metric_faded_frame);
- global_metrics.add("http_output_frames", {{ "type", "original" }, { "reason", "snapped" }}, &metric_original_snapped_frame);
- global_metrics.add("http_output_frames", {{ "type", "faded" }, { "reason", "snapped" }}, &metric_faded_snapped_frame);
- global_metrics.add("http_output_frames", {{ "type", "interpolated" }}, &metric_interpolated_frame);
- global_metrics.add("http_output_frames", {{ "type", "interpolated_faded" }}, &metric_interpolated_faded_frame);
- global_metrics.add("http_output_frames", {{ "type", "refresh" }}, &metric_refresh_frame);
- global_metrics.add("http_dropped_frames", {{ "type", "interpolated" }}, &metric_dropped_interpolated_frame);
- global_metrics.add("http_dropped_frames", {{ "type", "unconditional" }}, &metric_dropped_unconditional_frame);
+ global_metrics.add("http_output_frames", { { "type", "original" }, { "reason", "edge_frame_or_no_interpolation" } }, &metric_original_frame);
+ global_metrics.add("http_output_frames", { { "type", "faded" }, { "reason", "edge_frame_or_no_interpolation" } }, &metric_faded_frame);
+ global_metrics.add("http_output_frames", { { "type", "original" }, { "reason", "snapped" } }, &metric_original_snapped_frame);
+ global_metrics.add("http_output_frames", { { "type", "faded" }, { "reason", "snapped" } }, &metric_faded_snapped_frame);
+ global_metrics.add("http_output_frames", { { "type", "interpolated" } }, &metric_interpolated_frame);
+ global_metrics.add("http_output_frames", { { "type", "interpolated_faded" } }, &metric_interpolated_faded_frame);
+ global_metrics.add("http_output_frames", { { "type", "refresh" } }, &metric_refresh_frame);
+ global_metrics.add("http_dropped_frames", { { "type", "interpolated" } }, &metric_dropped_interpolated_frame);
+ global_metrics.add("http_dropped_frames", { { "type", "unconditional" } }, &metric_dropped_unconditional_frame);
}
}
bool find_surrounding_frames(int64_t pts, int stream_idx, FrameOnDisk *frame_lower, FrameOnDisk *frame_upper);
std::thread player_thread;
- std::atomic<bool> should_quit{false};
+ std::atomic<bool> should_quit{ false };
JPEGFrameView *destination;
done_callback_func done_callback;
std::mutex queue_state_mu;
std::condition_variable new_clip_changed;
- std::vector<Clip> queued_clip_list; // Under queue_state_mu.
+ std::vector<Clip> queued_clip_list; // Under queue_state_mu.
bool new_clip_ready = false; // Under queue_state_mu.
bool playing = false; // Under queue_state_mu.
int override_stream_idx = -1; // Under queue_state_mu.
std::unique_ptr<VideoStream> video_stream; // Can be nullptr.
- std::atomic<int64_t> metric_dropped_interpolated_frame{0};
- std::atomic<int64_t> metric_dropped_unconditional_frame{0};
- std::atomic<int64_t> metric_faded_frame{0};
- std::atomic<int64_t> metric_faded_snapped_frame{0};
- std::atomic<int64_t> metric_original_frame{0};
- std::atomic<int64_t> metric_original_snapped_frame{0};
- std::atomic<int64_t> metric_refresh_frame{0};
- std::atomic<int64_t> metric_interpolated_frame{0};
- std::atomic<int64_t> metric_interpolated_faded_frame{0};
+ std::atomic<int64_t> metric_dropped_interpolated_frame{ 0 };
+ std::atomic<int64_t> metric_dropped_unconditional_frame{ 0 };
+ std::atomic<int64_t> metric_faded_frame{ 0 };
+ std::atomic<int64_t> metric_faded_snapped_frame{ 0 };
+ std::atomic<int64_t> metric_original_frame{ 0 };
+ std::atomic<int64_t> metric_original_snapped_frame{ 0 };
+ std::atomic<int64_t> metric_refresh_frame{ 0 };
+ std::atomic<int64_t> metric_interpolated_frame{ 0 };
+ std::atomic<int64_t> metric_interpolated_faded_frame{ 0 };
// under queue_state_mu. Part of this instead of VideoStream so that we own
// its lock and can sleep on it.
class QueueSpotHolder {
public:
- QueueSpotHolder() : queue(nullptr) {}
+ QueueSpotHolder()
+ : queue(nullptr) {}
- explicit QueueSpotHolder(QueueInterface *queue) : queue(queue) {
+ explicit QueueSpotHolder(QueueInterface *queue)
+ : queue(queue)
+ {
queue->take_queue_spot();
}
- QueueSpotHolder(QueueSpotHolder &&other) : queue(other.queue) {
+ QueueSpotHolder(QueueSpotHolder &&other)
+ : queue(other.queue)
+ {
other.queue = nullptr;
}
- QueueSpotHolder &operator=(QueueSpotHolder &&other) {
+ QueueSpotHolder &operator=(QueueSpotHolder &&other)
+ {
queue = other.queue;
other.queue = nullptr;
return *this;
}
- ~QueueSpotHolder() {
+ ~QueueSpotHolder()
+ {
if (queue != nullptr) {
queue->release_queue_spot();
}
QueueSpotHolder &operator=(QueueSpotHolder &) = delete;
private:
- QueueInterface *queue;
+ QueueInterface *queue;
};
-#endif // !defined(_QUEUE_SPOT_HOLDER)
+#endif // !defined(_QUEUE_SPOT_HOLDER)
static list<VAResources> va_resources_freelist;
static mutex va_resources_mutex;
-#define CHECK_VASTATUS(va_status, func) \
- if (va_status != VA_STATUS_SUCCESS) { \
- fprintf(stderr, "%s:%d (%s) failed with %d\n", __func__, __LINE__, func, va_status); \
- exit(1); \
- }
-
-#define CHECK_VASTATUS_RET(va_status, func) \
- if (va_status != VA_STATUS_SUCCESS) { \
- fprintf(stderr, "%s:%d (%s) failed with %d\n", __func__, __LINE__, func, va_status); \
- return nullptr; \
- }
+#define CHECK_VASTATUS(va_status, func) \
+ if (va_status != VA_STATUS_SUCCESS) { \
+ fprintf(stderr, "%s:%d (%s) failed with %d\n", __func__, __LINE__, func, va_status); \
+ exit(1); \
+ }
+
+#define CHECK_VASTATUS_RET(va_status, func) \
+ if (va_status != VA_STATUS_SUCCESS) { \
+ fprintf(stderr, "%s:%d (%s) failed with %d\n", __func__, __LINE__, func, va_status); \
+ return nullptr; \
+ }
// From libjpeg (although it's of course identical between implementations).
static const int jpeg_natural_order[DCTSIZE2] = {
ret.height = height;
VAStatus va_status = vaCreateSurfaces(va_dpy->va_dpy, VA_RT_FORMAT_YUV422,
- width, height,
- &ret.surface, 1, nullptr, 0);
+ width, height,
+ &ret.surface, 1, nullptr, 0);
CHECK_VASTATUS(va_status, "vaCreateSurfaces");
va_status = vaCreateContext(va_dpy->va_dpy, config_id, width, height, 0, &ret.surface, 1, &ret.context);
glob_t g;
int err = glob("/dev/dri/renderD*", 0, nullptr, &g);
if (err != 0) {
- fprintf(stderr, "Couldn't list render nodes (%s) when trying to autodetect a replacement.\n", strerror(errno));
+ fprintf(stderr, "Couldn't list render nodes (%s) when trying to autodetect a replacement.\n", strerror(errno));
} else {
for (size_t i = 0; i < g.gl_pathc; ++i) {
string path = g.gl_pathv[i];
va_dpy = try_open_va(path, nullptr);
if (va_dpy != nullptr) {
fprintf(stderr, "Autodetected %s as a suitable replacement; using it.\n",
- path.c_str());
+ path.c_str());
globfree(&g);
if (need_env_reset) {
unsetenv("LIBVA_MESSAGING_LEVEL");
VAConfigAttrib attr = { VAConfigAttribRTFormat, VA_RT_FORMAT_YUV422 };
VAStatus va_status = vaCreateConfig(va_dpy->va_dpy, VAProfileJPEGBaseline, VAEntrypointVLD,
- &attr, 1, &config_id);
+ &attr, 1, &config_id);
CHECK_VASTATUS(va_status, "vaCreateConfig");
int num_formats = vaMaxNumImageFormats(va_dpy->va_dpy);
VABufferDestroyer(VADisplay dpy, VABufferID buf)
: dpy(dpy), buf(buf) {}
- ~VABufferDestroyer() {
+ ~VABufferDestroyer()
+ {
VAStatus va_status = vaDestroyBuffer(dpy, buf);
CHECK_VASTATUS(va_status, "vaDestroyBuffer");
}
if (dinfo.num_components != 3) {
fprintf(stderr, "Not a color JPEG. (%d components, Y=%dx%d, Cb=%dx%d, Cr=%dx%d)\n",
- dinfo.num_components,
- dinfo.comp_info[0].h_samp_factor, dinfo.comp_info[0].v_samp_factor,
- dinfo.comp_info[1].h_samp_factor, dinfo.comp_info[1].v_samp_factor,
- dinfo.comp_info[2].h_samp_factor, dinfo.comp_info[2].v_samp_factor);
+ dinfo.num_components,
+ dinfo.comp_info[0].h_samp_factor, dinfo.comp_info[0].v_samp_factor,
+ dinfo.comp_info[1].h_samp_factor, dinfo.comp_info[1].v_samp_factor,
+ dinfo.comp_info[2].h_samp_factor, dinfo.comp_info[2].v_samp_factor);
return nullptr;
}
if (dinfo.comp_info[0].h_samp_factor != 2 ||
dinfo.comp_info[2].h_samp_factor != 1 ||
dinfo.comp_info[2].v_samp_factor != dinfo.comp_info[0].v_samp_factor) {
fprintf(stderr, "Not 4:2:2. (Y=%dx%d, Cb=%dx%d, Cr=%dx%d)\n",
- dinfo.comp_info[0].h_samp_factor, dinfo.comp_info[0].v_samp_factor,
- dinfo.comp_info[1].h_samp_factor, dinfo.comp_info[1].v_samp_factor,
- dinfo.comp_info[2].h_samp_factor, dinfo.comp_info[2].v_samp_factor);
+ dinfo.comp_info[0].h_samp_factor, dinfo.comp_info[0].v_samp_factor,
+ dinfo.comp_info[1].h_samp_factor, dinfo.comp_info[1].v_samp_factor,
+ dinfo.comp_info[2].h_samp_factor, dinfo.comp_info[2].v_samp_factor);
return nullptr;
}
}
#include "chroma_subsampler.h"
-#include "shared/context.h"
#include "flags.h"
#include "flow.h"
-#include "shared/httpd.h"
#include "jpeg_frame_view.h"
#include "movit/util.h"
-#include "shared/mux.h"
#include "player.h"
+#include "shared/context.h"
+#include "shared/httpd.h"
+#include "shared/mux.h"
#include "util.h"
#include "ycbcr_converter.h"
size_t width = global_flags.width, height = global_flags.height; // Doesn't matter for MJPEG.
mux.reset(new Mux(avctx, width, height, Mux::CODEC_MJPEG, /*video_extradata=*/"", /*audio_codec_parameters=*/nullptr,
- AVCOL_SPC_BT709, COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}));
+ AVCOL_SPC_BT709, COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}));
encode_thread = thread(&VideoStream::encode_thread_func, this);
}
unique_lock<mutex> lock(queue_lock);
// Wait until we have a frame to play.
- queue_changed.wait(lock, [this]{
+ queue_changed.wait(lock, [this] {
return !frame_queue.empty() || should_quit;
});
if (should_quit) {
if (output_fast_forward) {
aborted = frame_queue.empty() || frame_queue.front().local_pts != frame_start;
} else {
- aborted = queue_changed.wait_until(lock, frame_start, [this, frame_start]{
+ aborted = queue_changed.wait_until(lock, frame_start, [this, frame_start] {
return frame_queue.empty() || frame_queue.front().local_pts != frame_start;
});
}
#include "frame_on_disk.h"
#include "jpeg_frame_view.h"
-#include "shared/ref_counted_gl_sync.h"
#include "queue_spot_holder.h"
+#include "shared/ref_counted_gl_sync.h"
#include <atomic>
#include <chrono>
FrameOnDisk frame1, FrameOnDisk frame2,
float fade_alpha);
void schedule_interpolated_frame(std::chrono::steady_clock::time_point, int64_t output_pts,
- std::function<void(std::shared_ptr<Frame>)> &&display_func,
- QueueSpotHolder &&queue_spot_holder,
- FrameOnDisk frame1, FrameOnDisk frame2,
- float alpha, FrameOnDisk secondary_frame = {}, // Empty = no secondary (fade) frame.
- float fade_alpha = 0.0f);
+ std::function<void(std::shared_ptr<Frame>)> &&display_func,
+ QueueSpotHolder &&queue_spot_holder,
+ FrameOnDisk frame1, FrameOnDisk frame2,
+ float alpha, FrameOnDisk secondary_frame = {}, // Empty = no secondary (fade) frame.
+ float fade_alpha = 0.0f);
void schedule_refresh_frame(std::chrono::steady_clock::time_point, int64_t output_pts,
std::function<void()> &&display_func,
QueueSpotHolder &&queue_spot_holder);
void encode_thread_func();
std::thread encode_thread;
- std::atomic<bool> should_quit{false};
+ std::atomic<bool> should_quit{ false };
static int write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time);
int write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time);
static constexpr size_t num_interpolate_slots = 15; // Should be larger than Player::max_queued_frames, or we risk mass-dropping frames.
struct IFRReleaser {
- void operator() (InterpolatedFrameResources *ifr) const
+ void operator()(InterpolatedFrameResources *ifr) const
{
if (ifr != nullptr) {
std::lock_guard<std::mutex> lock(ifr->owner->queue_lock);
fade_chain.chain.reset(new EffectChain(global_flags.width, global_flags.height, resource_pool));
fade_chain.input[0] = (movit::YCbCrInput *)fade_chain.chain->add_input(
new YCbCrInput(inout_format, ycbcr_format, global_flags.width, global_flags.height,
- first_input_is_semiplanar ? YCBCR_INPUT_SPLIT_Y_AND_CBCR : YCBCR_INPUT_PLANAR));
+ first_input_is_semiplanar ? YCBCR_INPUT_SPLIT_Y_AND_CBCR : YCBCR_INPUT_PLANAR));
fade_chain.input[1] = (movit::YCbCrInput *)fade_chain.chain->add_input(
new YCbCrInput(inout_format, ycbcr_format, global_flags.width, global_flags.height,
- second_input_is_semiplanar ? YCBCR_INPUT_SPLIT_Y_AND_CBCR : YCBCR_INPUT_PLANAR));
+ second_input_is_semiplanar ? YCBCR_INPUT_SPLIT_Y_AND_CBCR : YCBCR_INPUT_PLANAR));
fade_chain.mix_effect = (movit::MixEffect *)fade_chain.chain->add_effect(
new MixEffect, fade_chain.input[0], fade_chain.input[1]);
setup_outputs(output_mode, inout_format, ycbcr_output_format, fade_chain.chain.get());
ycbcr_format.chroma_subsampling_x = 1;
fade_chain.input[0] = (movit::YCbCrInput *)fade_chain.chain->add_input(
new YCbCrInput(inout_format, ycbcr_format, global_flags.width, global_flags.height,
- YCBCR_INPUT_INTERLEAVED));
+ YCBCR_INPUT_INTERLEAVED));
ycbcr_format.chroma_subsampling_x = 2;
fade_chain.input[1] = (movit::YCbCrInput *)fade_chain.chain->add_input(
new YCbCrInput(inout_format, ycbcr_format, global_flags.width, global_flags.height,
- second_input_is_semiplanar ? YCBCR_INPUT_SPLIT_Y_AND_CBCR : YCBCR_INPUT_PLANAR));
+ second_input_is_semiplanar ? YCBCR_INPUT_SPLIT_Y_AND_CBCR : YCBCR_INPUT_PLANAR));
fade_chain.mix_effect = (movit::MixEffect *)fade_chain.chain->add_effect(
new MixEffect, fade_chain.input[0], fade_chain.input[1]);