}
#include "chroma_subsampler.h"
-#include "shared/context.h"
+#include "exif_parser.h"
#include "flags.h"
#include "flow.h"
-#include "shared/httpd.h"
#include "jpeg_frame_view.h"
#include "movit/util.h"
-#include "shared/mux.h"
+#include "pbo_pool.h"
#include "player.h"
+#include "shared/context.h"
+#include "shared/httpd.h"
+#include "shared/metrics.h"
+#include "shared/shared_defs.h"
+#include "shared/mux.h"
#include "util.h"
#include "ycbcr_converter.h"
#include <jpeglib.h>
#include <unistd.h>
+using namespace movit;
using namespace std;
using namespace std::chrono;
+namespace {
+
+once_flag video_metrics_inited;
+Summary metric_jpeg_encode_time_seconds;
+Summary metric_fade_latency_seconds;
+Summary metric_interpolation_latency_seconds;
+Summary metric_fade_fence_wait_time_seconds;
+Summary metric_interpolation_fence_wait_time_seconds;
+
+void wait_for_upload(shared_ptr<Frame> &frame)
+{
+ if (frame->uploaded_interpolation != nullptr) {
+ glWaitSync(frame->uploaded_interpolation.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
+ frame->uploaded_interpolation.reset();
+ }
+}
+
+} // namespace
+
extern HTTPD *global_httpd;
struct VectorDestinationManager {
jpeg_destination_mgr pub;
- std::vector<uint8_t> dest;
+ string dest;
VectorDestinationManager()
{
{
dest.resize(bytes_used + 4096);
dest.resize(dest.capacity());
- pub.next_output_byte = dest.data() + bytes_used;
+ pub.next_output_byte = (uint8_t *)dest.data() + bytes_used;
pub.free_in_buffer = dest.size() - bytes_used;
}
};
static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
-vector<uint8_t> encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height)
+string encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height, const string exif_data)
{
+ steady_clock::time_point start = steady_clock::now();
VectorDestinationManager dest;
jpeg_compress_struct cinfo;
cinfo.CCIR601_sampling = true; // Seems to be mostly ignored by libjpeg, though.
jpeg_start_compress(&cinfo, true);
+ // This comment marker is private to FFmpeg. It signals limited Y'CbCr range
+ // (and nothing else).
+ jpeg_write_marker(&cinfo, JPEG_COM, (const JOCTET *)"CS=ITU601", strlen("CS=ITU601"));
+
+ if (!exif_data.empty()) {
+ jpeg_write_marker(&cinfo, JPEG_APP0 + 1, (const JOCTET *)exif_data.data(), exif_data.size());
+ }
+
JSAMPROW yptr[8], cbptr[8], crptr[8];
JSAMPARRAY data[3] = { yptr, cbptr, crptr };
for (unsigned y = 0; y < height; y += 8) {
jpeg_finish_compress(&cinfo);
jpeg_destroy_compress(&cinfo);
+ steady_clock::time_point stop = steady_clock::now();
+ metric_jpeg_encode_time_seconds.count_event(duration<double>(stop - start).count());
+
return move(dest.dest);
}
-VideoStream::VideoStream()
+string encode_jpeg_from_pbo(void *contents, unsigned width, unsigned height, const string exif_data)
+{
+ unsigned chroma_width = width / 2;
+
+ const uint8_t *y = (const uint8_t *)contents;
+ const uint8_t *cb = (const uint8_t *)contents + width * height;
+ const uint8_t *cr = (const uint8_t *)contents + width * height + chroma_width * height;
+ return encode_jpeg(y, cb, cr, width, height, move(exif_data));
+}
+
+VideoStream::VideoStream(AVFormatContext *file_avctx)
+ : avctx(file_avctx), output_fast_forward(file_avctx != nullptr)
{
+ call_once(video_metrics_inited, [] {
+ vector<double> quantiles{ 0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99 };
+ metric_jpeg_encode_time_seconds.init(quantiles, 60.0);
+ global_metrics.add("jpeg_encode_time_seconds", &metric_jpeg_encode_time_seconds);
+ metric_fade_fence_wait_time_seconds.init(quantiles, 60.0);
+ global_metrics.add("fade_fence_wait_time_seconds", &metric_fade_fence_wait_time_seconds);
+ metric_interpolation_fence_wait_time_seconds.init(quantiles, 60.0);
+ global_metrics.add("interpolation_fence_wait_time_seconds", &metric_interpolation_fence_wait_time_seconds);
+ metric_fade_latency_seconds.init(quantiles, 60.0);
+ global_metrics.add("fade_latency_seconds", &metric_fade_latency_seconds);
+ metric_interpolation_latency_seconds.init(quantiles, 60.0);
+ global_metrics.add("interpolation_latency_seconds", &metric_interpolation_latency_seconds);
+ });
+
ycbcr_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_DUAL_YCBCR, /*resource_pool=*/nullptr));
ycbcr_semiplanar_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_SEMIPLANAR, /*resource_pool=*/nullptr));
glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, cr_tex);
check_error();
- constexpr size_t width = 1280, height = 720; // FIXME: adjustable width, height
+ size_t width = global_flags.width, height = global_flags.height;
int levels = find_num_levels(width, height);
for (size_t i = 0; i < num_interpolate_slots; ++i) {
glTextureStorage3D(input_tex[i], levels, GL_RGBA8, width, height, 2);
check_error();
OperatingPoint op;
- if (global_flags.interpolation_quality == 1) {
+ if (global_flags.interpolation_quality == 0 ||
+ global_flags.interpolation_quality == 1) {
op = operating_point1;
} else if (global_flags.interpolation_quality == 2) {
op = operating_point2;
} else if (global_flags.interpolation_quality == 4) {
op = operating_point4;
} else {
+ // Quality 0 will be changed to 1 in flags.cpp.
assert(false);
}
check_error();
// The “last frame” is initially black.
- unique_ptr<uint8_t[]> y(new uint8_t[1280 * 720]);
- unique_ptr<uint8_t[]> cb_or_cr(new uint8_t[640 * 720]);
- memset(y.get(), 16, 1280 * 720);
- memset(cb_or_cr.get(), 128, 640 * 720);
- last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), 1280, 720);
+ unique_ptr<uint8_t[]> y(new uint8_t[global_flags.width * global_flags.height]);
+ unique_ptr<uint8_t[]> cb_or_cr(new uint8_t[(global_flags.width / 2) * global_flags.height]);
+ memset(y.get(), 16, global_flags.width * global_flags.height);
+ memset(cb_or_cr.get(), 128, (global_flags.width / 2) * global_flags.height);
+ last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), global_flags.width, global_flags.height, /*exif_data=*/"");
+
+ if (file_avctx != nullptr) {
+ with_subtitles = Mux::WITHOUT_SUBTITLES;
+ } else {
+ with_subtitles = Mux::WITH_SUBTITLES;
+ }
}
-VideoStream::~VideoStream() {}
+VideoStream::~VideoStream()
+{
+ if (last_flow_tex != 0) {
+ compute_flow->release_texture(last_flow_tex);
+ }
+
+ for (const unique_ptr<InterpolatedFrameResources> &resource : interpolate_resources) {
+ glUnmapNamedBuffer(resource->pbo);
+ check_error();
+ glDeleteBuffers(1, &resource->pbo);
+ check_error();
+ glDeleteFramebuffers(2, resource->input_fbos);
+ check_error();
+ glDeleteFramebuffers(1, &resource->fade_fbo);
+ check_error();
+ glDeleteTextures(1, &resource->input_tex);
+ check_error();
+ glDeleteTextures(1, &resource->gray_tex);
+ check_error();
+ glDeleteTextures(1, &resource->fade_y_output_tex);
+ check_error();
+ glDeleteTextures(1, &resource->fade_cbcr_output_tex);
+ check_error();
+ glDeleteTextures(1, &resource->cb_tex);
+ check_error();
+ glDeleteTextures(1, &resource->cr_tex);
+ check_error();
+ }
+ assert(interpolate_resources.size() == num_interpolate_slots);
+}
void VideoStream::start()
{
- AVFormatContext *avctx = avformat_alloc_context();
- avctx->oformat = av_guess_format("nut", nullptr, nullptr);
+ if (avctx == nullptr) {
+ avctx = avformat_alloc_context();
- uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
- avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
- avctx->pb->write_data_type = &VideoStream::write_packet2_thunk;
- avctx->pb->ignore_boundary_point = 1;
+ // We use Matroska, because it's pretty much the only mux where FFmpeg
+ // allows writing chroma location to override JFIF's default center placement.
+ // (Note that at the time of writing, however, FFmpeg does not correctly
+ // _read_ this information!)
+ avctx->oformat = av_guess_format("matroska", nullptr, nullptr);
- Mux::Codec video_codec = Mux::CODEC_MJPEG;
+ uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
+ avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
+ avctx->pb->write_data_type = &VideoStream::write_packet2_thunk;
+ avctx->pb->ignore_boundary_point = 1;
- avctx->flags = AVFMT_FLAG_CUSTOM_IO;
+ avctx->flags = AVFMT_FLAG_CUSTOM_IO;
+ }
- string video_extradata;
+ AVCodecParameters *audio_codecpar = avcodec_parameters_alloc();
- constexpr int width = 1280, height = 720; // Doesn't matter for MJPEG.
- stream_mux.reset(new Mux(avctx, width, height, video_codec, video_extradata, /*audio_codec_parameters=*/nullptr,
- AVCOL_SPC_BT709, Mux::WITHOUT_AUDIO,
- COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}));
+ audio_codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
+ audio_codecpar->codec_id = AV_CODEC_ID_PCM_S32LE;
+ audio_codecpar->channel_layout = AV_CH_LAYOUT_STEREO;
+ audio_codecpar->channels = 2;
+ audio_codecpar->sample_rate = OUTPUT_FREQUENCY;
+ size_t width = global_flags.width, height = global_flags.height; // Doesn't matter for MJPEG.
+ mux.reset(new Mux(avctx, width, height, Mux::CODEC_MJPEG, /*video_extradata=*/"", audio_codecpar,
+ AVCOL_SPC_BT709, COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}, with_subtitles));
+ avcodec_parameters_free(&audio_codecpar);
encode_thread = thread(&VideoStream::encode_thread_func, this);
}
void VideoStream::stop()
{
+ should_quit = true;
+ queue_changed.notify_all();
+ clear_queue();
encode_thread.join();
}
deque<QueuedFrame> q;
{
- unique_lock<mutex> lock(queue_lock);
+ lock_guard<mutex> lock(queue_lock);
q = move(frame_queue);
}
for (const QueuedFrame &qf : q) {
if (qf.type == QueuedFrame::INTERPOLATED ||
qf.type == QueuedFrame::FADED_INTERPOLATED) {
- compute_flow->release_texture(qf.flow_tex);
+ if (qf.flow_tex != 0) {
+ compute_flow->release_texture(qf.flow_tex);
+ }
}
if (qf.type == QueuedFrame::INTERPOLATED) {
interpolate->release_texture(qf.output_tex);
void VideoStream::schedule_original_frame(steady_clock::time_point local_pts,
int64_t output_pts, function<void()> &&display_func,
QueueSpotHolder &&queue_spot_holder,
- FrameOnDisk frame)
+ FrameOnDisk frame, const string &subtitle, bool include_audio)
{
- fprintf(stderr, "output_pts=%ld original input_pts=%ld\n", output_pts, frame.pts);
-
- // Preload the file from disk, so that the encoder thread does not get stalled.
- // TODO: Consider sending it through the queue instead.
- (void)frame_reader.read_frame(frame);
+ fprintf(stderr, "output_pts=%" PRId64 " original input_pts=%" PRId64 "\n", output_pts, frame.pts);
QueuedFrame qf;
qf.local_pts = local_pts;
qf.type = QueuedFrame::ORIGINAL;
qf.output_pts = output_pts;
- qf.frame1 = frame;
qf.display_func = move(display_func);
qf.queue_spot_holder = move(queue_spot_holder);
+ qf.subtitle = subtitle;
+ FrameReader::Frame read_frame = frame_reader.read_frame(frame, /*read_video=*/true, include_audio);
+ qf.encoded_jpeg.reset(new string(move(read_frame.video)));
+ qf.audio = move(read_frame.audio);
- unique_lock<mutex> lock(queue_lock);
+ lock_guard<mutex> lock(queue_lock);
frame_queue.push_back(move(qf));
queue_changed.notify_all();
}
function<void()> &&display_func,
QueueSpotHolder &&queue_spot_holder,
FrameOnDisk frame1_spec, FrameOnDisk frame2_spec,
- float fade_alpha)
+ float fade_alpha, const string &subtitle)
{
- fprintf(stderr, "output_pts=%ld faded input_pts=%ld,%ld fade_alpha=%.2f\n", output_pts, frame1_spec.pts, frame2_spec.pts, fade_alpha);
+ fprintf(stderr, "output_pts=%" PRId64 " faded input_pts=%" PRId64 ",%" PRId64 " fade_alpha=%.2f\n", output_pts, frame1_spec.pts, frame2_spec.pts, fade_alpha);
// Get the temporary OpenGL resources we need for doing the fade.
// (We share these with interpolated frames, which is slightly
// separate pools around.)
BorrowedInterpolatedFrameResources resources;
{
- unique_lock<mutex> lock(queue_lock);
+ lock_guard<mutex> lock(queue_lock);
if (interpolate_resources.empty()) {
fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
return;
shared_ptr<Frame> frame1 = decode_jpeg_with_cache(frame1_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
shared_ptr<Frame> frame2 = decode_jpeg_with_cache(frame2_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
+ wait_for_upload(frame1);
+ wait_for_upload(frame2);
- ycbcr_semiplanar_converter->prepare_chain_for_fade(frame1, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, 1280, 720);
+ ycbcr_semiplanar_converter->prepare_chain_for_fade(frame1, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, global_flags.width, global_flags.height);
QueuedFrame qf;
qf.local_pts = local_pts;
qf.frame1 = frame1_spec;
qf.display_func = move(display_func);
qf.queue_spot_holder = move(queue_spot_holder);
+ qf.subtitle = subtitle;
qf.secondary_frame = frame2_spec;
// Subsample and split Cb/Cr.
- chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, 1280, 720, resources->cb_tex, resources->cr_tex);
+ chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, global_flags.width, global_flags.height, resources->cb_tex, resources->cr_tex);
// Read it down (asynchronously) to the CPU.
glPixelStorei(GL_PACK_ROW_LENGTH, 0);
glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo);
check_error();
- glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
+ glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 4, BUFFER_OFFSET(0));
check_error();
- glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720));
+ glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3, BUFFER_OFFSET(global_flags.width * global_flags.height));
check_error();
- glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720));
+ glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3 - (global_flags.width / 2) * global_flags.height, BUFFER_OFFSET(global_flags.width * global_flags.height + (global_flags.width / 2) * global_flags.height));
check_error();
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
// Set a fence we can wait for to make sure the CPU sees the read.
glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
check_error();
+ qf.fence_created = steady_clock::now();
qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
check_error();
qf.resources = move(resources);
qf.local_pts = local_pts;
- unique_lock<mutex> lock(queue_lock);
+ lock_guard<mutex> lock(queue_lock);
frame_queue.push_back(move(qf));
queue_changed.notify_all();
}
int64_t output_pts, function<void(shared_ptr<Frame>)> &&display_func,
QueueSpotHolder &&queue_spot_holder,
FrameOnDisk frame1, FrameOnDisk frame2,
- float alpha, FrameOnDisk secondary_frame, float fade_alpha)
+ float alpha, FrameOnDisk secondary_frame, float fade_alpha, const string &subtitle,
+ bool play_audio)
{
if (secondary_frame.pts != -1) {
- fprintf(stderr, "output_pts=%ld interpolated input_pts1=%ld input_pts2=%ld alpha=%.3f secondary_pts=%ld fade_alpha=%.2f\n", output_pts, frame1.pts, frame2.pts, alpha, secondary_frame.pts, fade_alpha);
+ fprintf(stderr, "output_pts=%" PRId64 " interpolated input_pts1=%" PRId64 " input_pts2=%" PRId64 " alpha=%.3f secondary_pts=%" PRId64 " fade_alpha=%.2f\n", output_pts, frame1.pts, frame2.pts, alpha, secondary_frame.pts, fade_alpha);
} else {
- fprintf(stderr, "output_pts=%ld interpolated input_pts1=%ld input_pts2=%ld alpha=%.3f\n", output_pts, frame1.pts, frame2.pts, alpha);
+ fprintf(stderr, "output_pts=%" PRId64 " interpolated input_pts1=%" PRId64 " input_pts2=%" PRId64 " alpha=%.3f\n", output_pts, frame1.pts, frame2.pts, alpha);
}
// Get the temporary OpenGL resources we need for doing the interpolation.
BorrowedInterpolatedFrameResources resources;
{
- unique_lock<mutex> lock(queue_lock);
+ lock_guard<mutex> lock(queue_lock);
if (interpolate_resources.empty()) {
fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
return;
qf.display_decoded_func = move(display_func);
qf.queue_spot_holder = move(queue_spot_holder);
qf.local_pts = local_pts;
+ qf.subtitle = subtitle;
+
+ if (play_audio) {
+ qf.audio = frame_reader.read_frame(frame1, /*read_video=*/false, /*read_audio=*/true).audio;
+ }
check_error();
FrameOnDisk frame_spec = frame_no == 1 ? frame2 : frame1;
bool did_decode;
shared_ptr<Frame> frame = decode_jpeg_with_cache(frame_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
- ycbcr_converter->prepare_chain_for_conversion(frame)->render_to_fbo(resources->input_fbos[frame_no], 1280, 720);
+ wait_for_upload(frame);
+ ycbcr_converter->prepare_chain_for_conversion(frame)->render_to_fbo(resources->input_fbos[frame_no], global_flags.width, global_flags.height);
+ if (frame_no == 1) {
+ qf.exif_data = frame->exif_data; // Use the white point from the last frame.
+ }
}
glGenerateTextureMipmap(resources->input_tex);
glGenerateTextureMipmap(resources->gray_tex);
check_error();
- // Compute the interpolated frame.
- qf.flow_tex = compute_flow->exec(resources->gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
- check_error();
+ GLuint flow_tex;
+ if (last_flow_tex != 0 && frame1 == last_frame1 && frame2 == last_frame2) {
+ // Reuse the flow from previous computation. This frequently happens
+ // if we slow down by more than 2x, so that there are multiple interpolated
+ // frames between each original.
+ flow_tex = last_flow_tex;
+ qf.flow_tex = 0;
+ } else {
+ // Cache miss, so release last_flow_tex.
+ qf.flow_tex = last_flow_tex;
+
+ // Compute the flow.
+ flow_tex = compute_flow->exec(resources->gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
+ check_error();
+
+ // Store the flow texture for possible reuse next frame.
+ last_flow_tex = flow_tex;
+ last_frame1 = frame1;
+ last_frame2 = frame2;
+ }
if (secondary_frame.pts != -1) {
// Fade. First kick off the interpolation.
- tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, 1280, 720, alpha);
+ tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, flow_tex, global_flags.width, global_flags.height, alpha);
check_error();
// Now decode the image we are fading against.
bool did_decode;
shared_ptr<Frame> frame2 = decode_jpeg_with_cache(secondary_frame, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
+ wait_for_upload(frame2);
// Then fade against it, putting it into the fade Y' and CbCr textures.
- ycbcr_semiplanar_converter->prepare_chain_for_fade_from_texture(qf.output_tex, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, 1280, 720);
+ RGBTriplet neutral_color = get_neutral_color(qf.exif_data);
+ ycbcr_semiplanar_converter->prepare_chain_for_fade_from_texture(qf.output_tex, neutral_color, global_flags.width, global_flags.height, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, global_flags.width, global_flags.height);
// Subsample and split Cb/Cr.
- chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, 1280, 720, resources->cb_tex, resources->cr_tex);
+ chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, global_flags.width, global_flags.height, resources->cb_tex, resources->cr_tex);
interpolate_no_split->release_texture(qf.output_tex);
+
+ // We already applied the white balance, so don't have the client redo it.
+ qf.exif_data.clear();
} else {
- tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, 1280, 720, alpha);
+ tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, flow_tex, global_flags.width, global_flags.height, alpha);
check_error();
// Subsample and split Cb/Cr.
- chroma_subsampler->subsample_chroma(qf.cbcr_tex, 1280, 720, resources->cb_tex, resources->cr_tex);
+ chroma_subsampler->subsample_chroma(qf.cbcr_tex, global_flags.width, global_flags.height, resources->cb_tex, resources->cr_tex);
}
// We could have released qf.flow_tex here, but to make sure we don't cause a stall
// when trying to reuse it for the next frame, we can just as well hold on to it
// and release it only when the readback is done.
+ //
+ // TODO: This is maybe less relevant now that qf.flow_tex contains the texture we used
+ // _last_ frame, not this one.
// Read it down (asynchronously) to the CPU.
glPixelStorei(GL_PACK_ROW_LENGTH, 0);
glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo);
check_error();
if (secondary_frame.pts != -1) {
- glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
+ glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 4, BUFFER_OFFSET(0));
} else {
- glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
+ glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 4, BUFFER_OFFSET(0));
}
check_error();
- glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720));
+ glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3, BUFFER_OFFSET(global_flags.width * global_flags.height));
check_error();
- glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720));
+ glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3 - (global_flags.width / 2) * global_flags.height, BUFFER_OFFSET(global_flags.width * global_flags.height + (global_flags.width / 2) * global_flags.height));
check_error();
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
// Set a fence we can wait for to make sure the CPU sees the read.
glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
check_error();
+ qf.fence_created = steady_clock::now();
qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
check_error();
qf.resources = move(resources);
- unique_lock<mutex> lock(queue_lock);
+ lock_guard<mutex> lock(queue_lock);
frame_queue.push_back(move(qf));
queue_changed.notify_all();
}
void VideoStream::schedule_refresh_frame(steady_clock::time_point local_pts,
int64_t output_pts, function<void()> &&display_func,
- QueueSpotHolder &&queue_spot_holder)
+ QueueSpotHolder &&queue_spot_holder, const string &subtitle)
{
QueuedFrame qf;
qf.type = QueuedFrame::REFRESH;
qf.output_pts = output_pts;
qf.display_func = move(display_func);
qf.queue_spot_holder = move(queue_spot_holder);
+ qf.subtitle = subtitle;
- unique_lock<mutex> lock(queue_lock);
+ lock_guard<mutex> lock(queue_lock);
+ frame_queue.push_back(move(qf));
+ queue_changed.notify_all();
+}
+
+void VideoStream::schedule_silence(steady_clock::time_point local_pts, int64_t output_pts,
+ int64_t length_pts, QueueSpotHolder &&queue_spot_holder)
+{
+ QueuedFrame qf;
+ qf.type = QueuedFrame::SILENCE;
+ qf.output_pts = output_pts;
+ qf.queue_spot_holder = move(queue_spot_holder);
+ qf.silence_length_pts = length_pts;
+
+ lock_guard<mutex> lock(queue_lock);
frame_queue.push_back(move(qf));
queue_changed.notify_all();
}
namespace {
-shared_ptr<Frame> frame_from_pbo(void *contents, size_t width, size_t height)
+RefCountedTexture clone_r8_texture(GLuint src_tex, unsigned width, unsigned height)
{
- size_t chroma_width = width / 2;
-
- const uint8_t *y = (const uint8_t *)contents;
- const uint8_t *cb = (const uint8_t *)contents + width * height;
- const uint8_t *cr = (const uint8_t *)contents + width * height + chroma_width * height;
+ GLuint tex;
+ glCreateTextures(GL_TEXTURE_2D, 1, &tex);
+ check_error();
+ glTextureStorage2D(tex, 1, GL_R8, width, height);
+ check_error();
+ glCopyImageSubData(src_tex, GL_TEXTURE_2D, 0, 0, 0, 0,
+ tex, GL_TEXTURE_2D, 0, 0, 0, 0,
+ width, height, 1);
+ check_error();
+ glTextureParameteri(tex, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ check_error();
+ glTextureParameteri(tex, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ check_error();
+ glTextureParameteri(tex, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ check_error();
+ glTextureParameteri(tex, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ check_error();
- shared_ptr<Frame> frame(new Frame);
- frame->y.reset(new uint8_t[width * height]);
- frame->cb.reset(new uint8_t[chroma_width * height]);
- frame->cr.reset(new uint8_t[chroma_width * height]);
- for (unsigned yy = 0; yy < height; ++yy) {
- memcpy(frame->y.get() + width * yy, y + width * yy, width);
- memcpy(frame->cb.get() + chroma_width * yy, cb + chroma_width * yy, chroma_width);
- memcpy(frame->cr.get() + chroma_width * yy, cr + chroma_width * yy, chroma_width);
- }
- frame->is_semiplanar = false;
- frame->width = width;
- frame->height = height;
- frame->chroma_subsampling_x = 2;
- frame->chroma_subsampling_y = 1;
- frame->pitch_y = width;
- frame->pitch_chroma = chroma_width;
- return frame;
+ return RefCountedTexture(new GLuint(tex), TextureDeleter());
}
} // namespace
bool ok = make_current(context, surface);
if (!ok) {
fprintf(stderr, "Video stream couldn't get an OpenGL context\n");
- exit(1);
+ abort();
}
- for ( ;; ) {
+ init_pbo_pool();
+
+ while (!should_quit) {
QueuedFrame qf;
{
unique_lock<mutex> lock(queue_lock);
// Wait until we have a frame to play.
- queue_changed.wait(lock, [this]{
- return !frame_queue.empty();
+ queue_changed.wait(lock, [this] {
+ return !frame_queue.empty() || should_quit;
});
+ if (should_quit) {
+ break;
+ }
steady_clock::time_point frame_start = frame_queue.front().local_pts;
// Now sleep until the frame is supposed to start (the usual case),
// _or_ clear_queue() happened.
- bool aborted = queue_changed.wait_until(lock, frame_start, [this, frame_start]{
- return frame_queue.empty() || frame_queue.front().local_pts != frame_start;
- });
+ bool aborted;
+ if (output_fast_forward) {
+ aborted = frame_queue.empty() || frame_queue.front().local_pts != frame_start;
+ } else {
+ aborted = queue_changed.wait_until(lock, frame_start, [this, frame_start] {
+ return frame_queue.empty() || frame_queue.front().local_pts != frame_start;
+ });
+ }
if (aborted) {
// clear_queue() happened, so don't play this frame after all.
continue;
frame_queue.pop_front();
}
+ // Hack: We mux the subtitle packet one time unit before the actual frame,
+ // so that Nageru is sure to get it first.
+ if (!qf.subtitle.empty() && with_subtitles == Mux::WITH_SUBTITLES) {
+ AVPacket pkt;
+ av_init_packet(&pkt);
+ pkt.stream_index = mux->get_subtitle_stream_idx();
+ assert(pkt.stream_index != -1);
+ pkt.data = (uint8_t *)qf.subtitle.data();
+ pkt.size = qf.subtitle.size();
+ pkt.flags = 0;
+ pkt.duration = lrint(TIMEBASE / global_flags.output_framerate); // Doesn't really matter for Nageru.
+ mux->add_packet(pkt, qf.output_pts - 1, qf.output_pts - 1);
+ }
+
if (qf.type == QueuedFrame::ORIGINAL) {
// Send the JPEG frame on, unchanged.
- string jpeg = frame_reader.read_frame(qf.frame1);
+ string jpeg = move(*qf.encoded_jpeg);
AVPacket pkt;
av_init_packet(&pkt);
pkt.stream_index = 0;
pkt.data = (uint8_t *)jpeg.data();
pkt.size = jpeg.size();
- stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ pkt.flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ last_frame = move(jpeg);
- last_frame.assign(&jpeg[0], &jpeg[0] + jpeg.size());
+ add_audio_or_silence(qf);
} else if (qf.type == QueuedFrame::FADED) {
+ steady_clock::time_point start = steady_clock::now();
glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
-
- shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, 1280, 720);
+ steady_clock::time_point stop = steady_clock::now();
+ metric_fade_fence_wait_time_seconds.count_event(duration<double>(stop - start).count());
+ metric_fade_latency_seconds.count_event(duration<double>(stop - qf.fence_created).count());
// Now JPEG encode it, and send it on to the stream.
- vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720);
+ string jpeg = encode_jpeg_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height, /*exif_data=*/"");
AVPacket pkt;
av_init_packet(&pkt);
pkt.stream_index = 0;
pkt.data = (uint8_t *)jpeg.data();
pkt.size = jpeg.size();
- stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ pkt.flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(pkt, qf.output_pts, qf.output_pts);
last_frame = move(jpeg);
+
+ add_audio_or_silence(qf);
} else if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) {
+ steady_clock::time_point start = steady_clock::now();
glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
+ steady_clock::time_point stop = steady_clock::now();
+ metric_interpolation_fence_wait_time_seconds.count_event(duration<double>(stop - start).count());
+ metric_interpolation_latency_seconds.count_event(duration<double>(stop - qf.fence_created).count());
// Send it on to display.
- shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, 1280, 720);
if (qf.display_decoded_func != nullptr) {
- qf.display_decoded_func(frame);
+ shared_ptr<Frame> frame(new Frame);
+ if (qf.type == QueuedFrame::FADED_INTERPOLATED) {
+ frame->y = clone_r8_texture(qf.resources->fade_y_output_tex, global_flags.width, global_flags.height);
+ } else {
+ frame->y = clone_r8_texture(qf.output_tex, global_flags.width, global_flags.height);
+ }
+ frame->cb = clone_r8_texture(qf.resources->cb_tex, global_flags.width / 2, global_flags.height);
+ frame->cr = clone_r8_texture(qf.resources->cr_tex, global_flags.width / 2, global_flags.height);
+ frame->width = global_flags.width;
+ frame->height = global_flags.height;
+ frame->chroma_subsampling_x = 2;
+ frame->chroma_subsampling_y = 1;
+ frame->uploaded_ui_thread = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
+ qf.display_decoded_func(move(frame));
}
// Now JPEG encode it, and send it on to the stream.
- vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720);
- compute_flow->release_texture(qf.flow_tex);
+ string jpeg = encode_jpeg_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height, move(qf.exif_data));
+ if (qf.flow_tex != 0) {
+ compute_flow->release_texture(qf.flow_tex);
+ }
if (qf.type != QueuedFrame::FADED_INTERPOLATED) {
interpolate->release_texture(qf.output_tex);
interpolate->release_texture(qf.cbcr_tex);
pkt.stream_index = 0;
pkt.data = (uint8_t *)jpeg.data();
pkt.size = jpeg.size();
- stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ pkt.flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(pkt, qf.output_pts, qf.output_pts);
last_frame = move(jpeg);
+
+ add_audio_or_silence(qf);
} else if (qf.type == QueuedFrame::REFRESH) {
AVPacket pkt;
av_init_packet(&pkt);
pkt.stream_index = 0;
pkt.data = (uint8_t *)last_frame.data();
pkt.size = last_frame.size();
- stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ pkt.flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+
+ add_audio_or_silence(qf); // Definitely silence.
+ } else if (qf.type == QueuedFrame::SILENCE) {
+ add_silence(qf.output_pts, qf.silence_length_pts);
} else {
assert(false);
}
type = AVIO_DATA_MARKER_SYNC_POINT;
}
+ HTTPD::StreamID stream_id{ HTTPD::MAIN_STREAM, 0 };
if (type == AVIO_DATA_MARKER_HEADER) {
stream_mux_header.append((char *)buf, buf_size);
- global_httpd->set_header(stream_mux_header);
+ global_httpd->set_header(stream_id, stream_mux_header);
} else {
- global_httpd->add_data((char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
+ global_httpd->add_data(stream_id, (char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
}
return buf_size;
}
+
+void VideoStream::add_silence(int64_t pts, int64_t length_pts)
+{
+ // At 59.94, this will never quite add up (even discounting refresh frames,
+ // which have unpredictable length), but hopefully, the player in the other
+ // end should be able to stretch silence easily enough.
+ long num_samples = lrint(length_pts * double(OUTPUT_FREQUENCY) / double(TIMEBASE)) * 2;
+ uint8_t *zero = (uint8_t *)calloc(num_samples, sizeof(int32_t));
+
+ AVPacket pkt;
+ av_init_packet(&pkt);
+ pkt.stream_index = 1;
+ pkt.data = zero;
+ pkt.size = num_samples * sizeof(int32_t);
+ pkt.flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(pkt, pts, pts);
+
+ free(zero);
+}
+
+void VideoStream::add_audio_or_silence(const QueuedFrame &qf)
+{
+ if (qf.audio.empty()) {
+ int64_t frame_length = lrint(double(TIMEBASE) / global_flags.output_framerate);
+ add_silence(qf.output_pts, frame_length);
+ } else {
+ AVPacket pkt;
+ av_init_packet(&pkt);
+ pkt.stream_index = 1;
+ pkt.data = (uint8_t *)qf.audio.data();
+ pkt.size = qf.audio.size();
+ pkt.flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ }
+}