}
#include "chroma_subsampler.h"
-#include "shared/context.h"
#include "flags.h"
#include "flow.h"
-#include "shared/httpd.h"
#include "jpeg_frame_view.h"
#include "movit/util.h"
-#include "shared/mux.h"
#include "player.h"
+#include "shared/context.h"
+#include "shared/httpd.h"
+#include "shared/mux.h"
#include "util.h"
#include "ycbcr_converter.h"
struct VectorDestinationManager {
jpeg_destination_mgr pub;
- std::vector<uint8_t> dest;
+ string dest;
VectorDestinationManager()
{
{
dest.resize(bytes_used + 4096);
dest.resize(dest.capacity());
- pub.next_output_byte = dest.data() + bytes_used;
+ pub.next_output_byte = (uint8_t *)dest.data() + bytes_used;
pub.free_in_buffer = dest.size() - bytes_used;
}
};
static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
-vector<uint8_t> encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height)
+string encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height)
{
VectorDestinationManager dest;
if (last_flow_tex != 0) {
compute_flow->release_texture(last_flow_tex);
}
+
+ for (const unique_ptr<InterpolatedFrameResources> &resource : interpolate_resources) {
+ glUnmapNamedBuffer(resource->pbo);
+ check_error();
+ glDeleteBuffers(1, &resource->pbo);
+ check_error();
+ glDeleteFramebuffers(2, resource->input_fbos);
+ check_error();
+ glDeleteFramebuffers(1, &resource->fade_fbo);
+ check_error();
+ glDeleteTextures(1, &resource->input_tex);
+ check_error();
+ glDeleteTextures(1, &resource->gray_tex);
+ check_error();
+ glDeleteTextures(1, &resource->fade_y_output_tex);
+ check_error();
+ glDeleteTextures(1, &resource->fade_cbcr_output_tex);
+ check_error();
+ glDeleteTextures(1, &resource->cb_tex);
+ check_error();
+ glDeleteTextures(1, &resource->cr_tex);
+ check_error();
+ }
+ assert(interpolate_resources.size() == num_interpolate_slots);
}
void VideoStream::start()
size_t width = global_flags.width, height = global_flags.height; // Doesn't matter for MJPEG.
mux.reset(new Mux(avctx, width, height, Mux::CODEC_MJPEG, /*video_extradata=*/"", /*audio_codec_parameters=*/nullptr,
- AVCOL_SPC_BT709, Mux::WITHOUT_AUDIO,
- COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}));
+ AVCOL_SPC_BT709, COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}, Mux::WITH_SUBTITLES));
encode_thread = thread(&VideoStream::encode_thread_func, this);
}
deque<QueuedFrame> q;
{
- unique_lock<mutex> lock(queue_lock);
+ lock_guard<mutex> lock(queue_lock);
q = move(frame_queue);
}
void VideoStream::schedule_original_frame(steady_clock::time_point local_pts,
int64_t output_pts, function<void()> &&display_func,
QueueSpotHolder &&queue_spot_holder,
- FrameOnDisk frame)
+ FrameOnDisk frame, const string &subtitle)
{
- fprintf(stderr, "output_pts=%ld original input_pts=%ld\n", output_pts, frame.pts);
-
- // Preload the file from disk, so that the encoder thread does not get stalled.
- // TODO: Consider sending it through the queue instead.
- (void)frame_reader.read_frame(frame);
+ fprintf(stderr, "output_pts=%" PRId64 " original input_pts=%" PRId64 "\n", output_pts, frame.pts);
QueuedFrame qf;
qf.local_pts = local_pts;
qf.type = QueuedFrame::ORIGINAL;
qf.output_pts = output_pts;
- qf.frame1 = frame;
qf.display_func = move(display_func);
qf.queue_spot_holder = move(queue_spot_holder);
+ qf.subtitle = subtitle;
+ qf.encoded_jpeg.reset(new string(frame_reader.read_frame(frame)));
- unique_lock<mutex> lock(queue_lock);
+ lock_guard<mutex> lock(queue_lock);
frame_queue.push_back(move(qf));
queue_changed.notify_all();
}
function<void()> &&display_func,
QueueSpotHolder &&queue_spot_holder,
FrameOnDisk frame1_spec, FrameOnDisk frame2_spec,
- float fade_alpha)
+ float fade_alpha, const string &subtitle)
{
- fprintf(stderr, "output_pts=%ld faded input_pts=%ld,%ld fade_alpha=%.2f\n", output_pts, frame1_spec.pts, frame2_spec.pts, fade_alpha);
+ fprintf(stderr, "output_pts=%" PRId64 " faded input_pts=%" PRId64 ",%" PRId64 " fade_alpha=%.2f\n", output_pts, frame1_spec.pts, frame2_spec.pts, fade_alpha);
// Get the temporary OpenGL resources we need for doing the fade.
// (We share these with interpolated frames, which is slightly
// separate pools around.)
BorrowedInterpolatedFrameResources resources;
{
- unique_lock<mutex> lock(queue_lock);
+ lock_guard<mutex> lock(queue_lock);
if (interpolate_resources.empty()) {
fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
return;
qf.frame1 = frame1_spec;
qf.display_func = move(display_func);
qf.queue_spot_holder = move(queue_spot_holder);
+ qf.subtitle = subtitle;
qf.secondary_frame = frame2_spec;
qf.resources = move(resources);
qf.local_pts = local_pts;
- unique_lock<mutex> lock(queue_lock);
+ lock_guard<mutex> lock(queue_lock);
frame_queue.push_back(move(qf));
queue_changed.notify_all();
}
int64_t output_pts, function<void(shared_ptr<Frame>)> &&display_func,
QueueSpotHolder &&queue_spot_holder,
FrameOnDisk frame1, FrameOnDisk frame2,
- float alpha, FrameOnDisk secondary_frame, float fade_alpha)
+ float alpha, FrameOnDisk secondary_frame, float fade_alpha, const string &subtitle)
{
if (secondary_frame.pts != -1) {
- fprintf(stderr, "output_pts=%ld interpolated input_pts1=%ld input_pts2=%ld alpha=%.3f secondary_pts=%ld fade_alpha=%.2f\n", output_pts, frame1.pts, frame2.pts, alpha, secondary_frame.pts, fade_alpha);
+ fprintf(stderr, "output_pts=%" PRId64 " interpolated input_pts1=%" PRId64 " input_pts2=%" PRId64 " alpha=%.3f secondary_pts=%" PRId64 " fade_alpha=%.2f\n", output_pts, frame1.pts, frame2.pts, alpha, secondary_frame.pts, fade_alpha);
} else {
- fprintf(stderr, "output_pts=%ld interpolated input_pts1=%ld input_pts2=%ld alpha=%.3f\n", output_pts, frame1.pts, frame2.pts, alpha);
+ fprintf(stderr, "output_pts=%" PRId64 " interpolated input_pts1=%" PRId64 " input_pts2=%" PRId64 " alpha=%.3f\n", output_pts, frame1.pts, frame2.pts, alpha);
}
// Get the temporary OpenGL resources we need for doing the interpolation.
BorrowedInterpolatedFrameResources resources;
{
- unique_lock<mutex> lock(queue_lock);
+ lock_guard<mutex> lock(queue_lock);
if (interpolate_resources.empty()) {
fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
return;
qf.display_decoded_func = move(display_func);
qf.queue_spot_holder = move(queue_spot_holder);
qf.local_pts = local_pts;
+ qf.subtitle = subtitle;
check_error();
check_error();
qf.resources = move(resources);
- unique_lock<mutex> lock(queue_lock);
+ lock_guard<mutex> lock(queue_lock);
frame_queue.push_back(move(qf));
queue_changed.notify_all();
}
void VideoStream::schedule_refresh_frame(steady_clock::time_point local_pts,
int64_t output_pts, function<void()> &&display_func,
- QueueSpotHolder &&queue_spot_holder)
+ QueueSpotHolder &&queue_spot_holder, const string &subtitle)
{
QueuedFrame qf;
qf.type = QueuedFrame::REFRESH;
qf.output_pts = output_pts;
qf.display_func = move(display_func);
qf.queue_spot_holder = move(queue_spot_holder);
+ qf.subtitle = subtitle;
- unique_lock<mutex> lock(queue_lock);
+ lock_guard<mutex> lock(queue_lock);
frame_queue.push_back(move(qf));
queue_changed.notify_all();
}
unique_lock<mutex> lock(queue_lock);
// Wait until we have a frame to play.
- queue_changed.wait(lock, [this]{
+ queue_changed.wait(lock, [this] {
return !frame_queue.empty() || should_quit;
});
if (should_quit) {
if (output_fast_forward) {
aborted = frame_queue.empty() || frame_queue.front().local_pts != frame_start;
} else {
- aborted = queue_changed.wait_until(lock, frame_start, [this, frame_start]{
+ aborted = queue_changed.wait_until(lock, frame_start, [this, frame_start] {
return frame_queue.empty() || frame_queue.front().local_pts != frame_start;
});
}
frame_queue.pop_front();
}
+ // Hack: We mux the subtitle packet one time unit before the actual frame,
+ // so that Nageru is sure to get it first.
+ if (!qf.subtitle.empty()) {
+ AVPacket pkt;
+ av_init_packet(&pkt);
+ pkt.stream_index = mux->get_subtitle_stream_idx();
+ assert(pkt.stream_index != -1);
+ pkt.data = (uint8_t *)qf.subtitle.data();
+ pkt.size = qf.subtitle.size();
+ pkt.flags = 0;
+ pkt.duration = lrint(TIMEBASE / global_flags.output_framerate); // Doesn't really matter for Nageru.
+ mux->add_packet(pkt, qf.output_pts - 1, qf.output_pts - 1);
+ }
+
if (qf.type == QueuedFrame::ORIGINAL) {
// Send the JPEG frame on, unchanged.
- string jpeg = frame_reader.read_frame(qf.frame1);
+ string jpeg = move(*qf.encoded_jpeg);
AVPacket pkt;
av_init_packet(&pkt);
pkt.stream_index = 0;
pkt.size = jpeg.size();
pkt.flags = AV_PKT_FLAG_KEY;
mux->add_packet(pkt, qf.output_pts, qf.output_pts);
-
- last_frame.assign(&jpeg[0], &jpeg[0] + jpeg.size());
+ last_frame = move(jpeg);
} else if (qf.type == QueuedFrame::FADED) {
glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height);
// Now JPEG encode it, and send it on to the stream.
- vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height);
+ string jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height);
AVPacket pkt;
av_init_packet(&pkt);
}
// Now JPEG encode it, and send it on to the stream.
- vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height);
+ string jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height);
if (qf.flow_tex != 0) {
compute_flow->release_texture(qf.flow_tex);
}