extern "C" {
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
+#include <libavutil/channel_layout.h>
}
#include "chroma_subsampler.h"
#include "flow.h"
#include "jpeg_frame_view.h"
#include "movit/util.h"
+#include "pbo_pool.h"
#include "player.h"
#include "shared/context.h"
+#include "shared/ffmpeg_raii.h"
#include "shared/httpd.h"
#include "shared/metrics.h"
#include "shared/shared_defs.h"
Summary metric_fade_fence_wait_time_seconds;
Summary metric_interpolation_fence_wait_time_seconds;
+void wait_for_upload(shared_ptr<Frame> &frame)
+{
+ if (frame->uploaded_interpolation != nullptr) {
+ glWaitSync(frame->uploaded_interpolation.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
+ frame->uploaded_interpolation.reset();
+ }
+}
+
} // namespace
extern HTTPD *global_httpd;
return move(dest.dest);
}
+string encode_jpeg_from_pbo(void *contents, unsigned width, unsigned height, const string exif_data)
+{
+ unsigned chroma_width = width / 2;
+
+ const uint8_t *y = (const uint8_t *)contents;
+ const uint8_t *cb = (const uint8_t *)contents + width * height;
+ const uint8_t *cr = (const uint8_t *)contents + width * height + chroma_width * height;
+ return encode_jpeg(y, cb, cr, width, height, move(exif_data));
+}
+
VideoStream::VideoStream(AVFormatContext *file_avctx)
: avctx(file_avctx), output_fast_forward(file_avctx != nullptr)
{
audio_codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
audio_codecpar->codec_id = AV_CODEC_ID_PCM_S32LE;
- audio_codecpar->channel_layout = AV_CH_LAYOUT_STEREO;
- audio_codecpar->channels = 2;
+ audio_codecpar->ch_layout.order = AV_CHANNEL_ORDER_NATIVE;
+ audio_codecpar->ch_layout.nb_channels = 2;
+ audio_codecpar->ch_layout.u.mask = AV_CH_LAYOUT_STEREO;
audio_codecpar->sample_rate = OUTPUT_FREQUENCY;
size_t width = global_flags.width, height = global_flags.height; // Doesn't matter for MJPEG.
shared_ptr<Frame> frame1 = decode_jpeg_with_cache(frame1_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
shared_ptr<Frame> frame2 = decode_jpeg_with_cache(frame2_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
+ wait_for_upload(frame1);
+ wait_for_upload(frame2);
ycbcr_semiplanar_converter->prepare_chain_for_fade(frame1, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, global_flags.width, global_flags.height);
FrameOnDisk frame_spec = frame_no == 1 ? frame2 : frame1;
bool did_decode;
shared_ptr<Frame> frame = decode_jpeg_with_cache(frame_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
+ wait_for_upload(frame);
ycbcr_converter->prepare_chain_for_conversion(frame)->render_to_fbo(resources->input_fbos[frame_no], global_flags.width, global_flags.height);
if (frame_no == 1) {
qf.exif_data = frame->exif_data; // Use the white point from the last frame.
// Now decode the image we are fading against.
bool did_decode;
shared_ptr<Frame> frame2 = decode_jpeg_with_cache(secondary_frame, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
+ wait_for_upload(frame2);
// Then fade against it, putting it into the fade Y' and CbCr textures.
RGBTriplet neutral_color = get_neutral_color(qf.exif_data);
namespace {
-shared_ptr<Frame> frame_from_pbo(void *contents, size_t width, size_t height)
+RefCountedTexture clone_r8_texture(GLuint src_tex, unsigned width, unsigned height)
{
- size_t chroma_width = width / 2;
-
- const uint8_t *y = (const uint8_t *)contents;
- const uint8_t *cb = (const uint8_t *)contents + width * height;
- const uint8_t *cr = (const uint8_t *)contents + width * height + chroma_width * height;
+ GLuint tex;
+ glCreateTextures(GL_TEXTURE_2D, 1, &tex);
+ check_error();
+ glTextureStorage2D(tex, 1, GL_R8, width, height);
+ check_error();
+ glCopyImageSubData(src_tex, GL_TEXTURE_2D, 0, 0, 0, 0,
+ tex, GL_TEXTURE_2D, 0, 0, 0, 0,
+ width, height, 1);
+ check_error();
+ glTextureParameteri(tex, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ check_error();
+ glTextureParameteri(tex, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ check_error();
+ glTextureParameteri(tex, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ check_error();
+ glTextureParameteri(tex, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ check_error();
- shared_ptr<Frame> frame(new Frame);
- frame->y.reset(new uint8_t[width * height]);
- frame->cb.reset(new uint8_t[chroma_width * height]);
- frame->cr.reset(new uint8_t[chroma_width * height]);
- for (unsigned yy = 0; yy < height; ++yy) {
- memcpy(frame->y.get() + width * yy, y + width * yy, width);
- memcpy(frame->cb.get() + chroma_width * yy, cb + chroma_width * yy, chroma_width);
- memcpy(frame->cr.get() + chroma_width * yy, cr + chroma_width * yy, chroma_width);
- }
- frame->is_semiplanar = false;
- frame->width = width;
- frame->height = height;
- frame->chroma_subsampling_x = 2;
- frame->chroma_subsampling_y = 1;
- frame->pitch_y = width;
- frame->pitch_chroma = chroma_width;
- return frame;
+ return RefCountedTexture(new GLuint(tex), TextureDeleter());
}
} // namespace
abort();
}
+ init_pbo_pool();
+
while (!should_quit) {
QueuedFrame qf;
{
// Hack: We mux the subtitle packet one time unit before the actual frame,
// so that Nageru is sure to get it first.
if (!qf.subtitle.empty() && with_subtitles == Mux::WITH_SUBTITLES) {
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.stream_index = mux->get_subtitle_stream_idx();
- assert(pkt.stream_index != -1);
- pkt.data = (uint8_t *)qf.subtitle.data();
- pkt.size = qf.subtitle.size();
- pkt.flags = 0;
- pkt.duration = lrint(TIMEBASE / global_flags.output_framerate); // Doesn't really matter for Nageru.
- mux->add_packet(pkt, qf.output_pts - 1, qf.output_pts - 1);
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->stream_index = mux->get_subtitle_stream_idx();
+ assert(pkt->stream_index != -1);
+ pkt->data = (uint8_t *)qf.subtitle.data();
+ pkt->size = qf.subtitle.size();
+ pkt->flags = 0;
+ pkt->duration = lrint(TIMEBASE / global_flags.output_framerate); // Doesn't really matter for Nageru.
+ mux->add_packet(*pkt, qf.output_pts - 1, qf.output_pts - 1);
}
if (qf.type == QueuedFrame::ORIGINAL) {
// Send the JPEG frame on, unchanged.
string jpeg = move(*qf.encoded_jpeg);
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.stream_index = 0;
- pkt.data = (uint8_t *)jpeg.data();
- pkt.size = jpeg.size();
- pkt.flags = AV_PKT_FLAG_KEY;
- mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->stream_index = 0;
+ pkt->data = (uint8_t *)jpeg.data();
+ pkt->size = jpeg.size();
+ pkt->flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(*pkt, qf.output_pts, qf.output_pts);
last_frame = move(jpeg);
add_audio_or_silence(qf);
metric_fade_fence_wait_time_seconds.count_event(duration<double>(stop - start).count());
metric_fade_latency_seconds.count_event(duration<double>(stop - qf.fence_created).count());
- shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height);
- assert(frame->exif_data.empty());
-
// Now JPEG encode it, and send it on to the stream.
- string jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height, /*exif_data=*/"");
-
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.stream_index = 0;
- pkt.data = (uint8_t *)jpeg.data();
- pkt.size = jpeg.size();
- pkt.flags = AV_PKT_FLAG_KEY;
- mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ string jpeg = encode_jpeg_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height, /*exif_data=*/"");
+
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->stream_index = 0;
+ pkt->data = (uint8_t *)jpeg.data();
+ pkt->size = jpeg.size();
+ pkt->flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(*pkt, qf.output_pts, qf.output_pts);
last_frame = move(jpeg);
add_audio_or_silence(qf);
metric_interpolation_latency_seconds.count_event(duration<double>(stop - qf.fence_created).count());
// Send it on to display.
- shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height);
if (qf.display_decoded_func != nullptr) {
- qf.display_decoded_func(frame);
+ shared_ptr<Frame> frame(new Frame);
+ if (qf.type == QueuedFrame::FADED_INTERPOLATED) {
+ frame->y = clone_r8_texture(qf.resources->fade_y_output_tex, global_flags.width, global_flags.height);
+ } else {
+ frame->y = clone_r8_texture(qf.output_tex, global_flags.width, global_flags.height);
+ }
+ frame->cb = clone_r8_texture(qf.resources->cb_tex, global_flags.width / 2, global_flags.height);
+ frame->cr = clone_r8_texture(qf.resources->cr_tex, global_flags.width / 2, global_flags.height);
+ frame->width = global_flags.width;
+ frame->height = global_flags.height;
+ frame->chroma_subsampling_x = 2;
+ frame->chroma_subsampling_y = 1;
+ frame->uploaded_ui_thread = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
+ qf.display_decoded_func(move(frame));
}
// Now JPEG encode it, and send it on to the stream.
- string jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height, move(qf.exif_data));
+ string jpeg = encode_jpeg_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height, move(qf.exif_data));
if (qf.flow_tex != 0) {
compute_flow->release_texture(qf.flow_tex);
}
interpolate->release_texture(qf.cbcr_tex);
}
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.stream_index = 0;
- pkt.data = (uint8_t *)jpeg.data();
- pkt.size = jpeg.size();
- pkt.flags = AV_PKT_FLAG_KEY;
- mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->stream_index = 0;
+ pkt->data = (uint8_t *)jpeg.data();
+ pkt->size = jpeg.size();
+ pkt->flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(*pkt, qf.output_pts, qf.output_pts);
last_frame = move(jpeg);
add_audio_or_silence(qf);
} else if (qf.type == QueuedFrame::REFRESH) {
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.stream_index = 0;
- pkt.data = (uint8_t *)last_frame.data();
- pkt.size = last_frame.size();
- pkt.flags = AV_PKT_FLAG_KEY;
- mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->stream_index = 0;
+ pkt->data = (uint8_t *)last_frame.data();
+ pkt->size = last_frame.size();
+ pkt->flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(*pkt, qf.output_pts, qf.output_pts);
add_audio_or_silence(qf); // Definitely silence.
} else if (qf.type == QueuedFrame::SILENCE) {
long num_samples = lrint(length_pts * double(OUTPUT_FREQUENCY) / double(TIMEBASE)) * 2;
uint8_t *zero = (uint8_t *)calloc(num_samples, sizeof(int32_t));
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.stream_index = 1;
- pkt.data = zero;
- pkt.size = num_samples * sizeof(int32_t);
- pkt.flags = AV_PKT_FLAG_KEY;
- mux->add_packet(pkt, pts, pts);
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->stream_index = 1;
+ pkt->data = zero;
+ pkt->size = num_samples * sizeof(int32_t);
+ pkt->flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(*pkt, pts, pts);
free(zero);
}
int64_t frame_length = lrint(double(TIMEBASE) / global_flags.output_framerate);
add_silence(qf.output_pts, frame_length);
} else {
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.stream_index = 1;
- pkt.data = (uint8_t *)qf.audio.data();
- pkt.size = qf.audio.size();
- pkt.flags = AV_PKT_FLAG_KEY;
- mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ AVPacketWithDeleter pkt = av_packet_alloc_unique();
+ pkt->stream_index = 1;
+ pkt->data = (uint8_t *)qf.audio.data();
+ pkt->size = qf.audio.size();
+ pkt->flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(*pkt, qf.output_pts, qf.output_pts);
}
}