}
#include "chroma_subsampler.h"
+#include "exif_parser.h"
#include "flags.h"
#include "flow.h"
#include "jpeg_frame_view.h"
#include "player.h"
#include "shared/context.h"
#include "shared/httpd.h"
+#include "shared/shared_defs.h"
#include "shared/mux.h"
#include "util.h"
#include "ycbcr_converter.h"
#include <jpeglib.h>
#include <unistd.h>
+using namespace movit;
using namespace std;
using namespace std::chrono;
struct VectorDestinationManager {
jpeg_destination_mgr pub;
- std::vector<uint8_t> dest;
+ string dest;
VectorDestinationManager()
{
{
dest.resize(bytes_used + 4096);
dest.resize(dest.capacity());
- pub.next_output_byte = dest.data() + bytes_used;
+ pub.next_output_byte = (uint8_t *)dest.data() + bytes_used;
pub.free_in_buffer = dest.size() - bytes_used;
}
};
static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
-vector<uint8_t> encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height)
+string encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height, const string exif_data)
{
VectorDestinationManager dest;
// (and nothing else).
jpeg_write_marker(&cinfo, JPEG_COM, (const JOCTET *)"CS=ITU601", strlen("CS=ITU601"));
+ if (!exif_data.empty()) {
+ jpeg_write_marker(&cinfo, JPEG_APP0 + 1, (const JOCTET *)exif_data.data(), exif_data.size());
+ }
+
JSAMPROW yptr[8], cbptr[8], crptr[8];
JSAMPARRAY data[3] = { yptr, cbptr, crptr };
for (unsigned y = 0; y < height; y += 8) {
unique_ptr<uint8_t[]> cb_or_cr(new uint8_t[(global_flags.width / 2) * global_flags.height]);
memset(y.get(), 16, global_flags.width * global_flags.height);
memset(cb_or_cr.get(), 128, (global_flags.width / 2) * global_flags.height);
- last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), global_flags.width, global_flags.height);
+ last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), global_flags.width, global_flags.height, /*exif_data=*/"");
+
+ if (file_avctx != nullptr) {
+ with_subtitles = Mux::WITHOUT_SUBTITLES;
+ } else {
+ with_subtitles = Mux::WITH_SUBTITLES;
+ }
}
VideoStream::~VideoStream()
if (last_flow_tex != 0) {
compute_flow->release_texture(last_flow_tex);
}
+
+ for (const unique_ptr<InterpolatedFrameResources> &resource : interpolate_resources) {
+ glUnmapNamedBuffer(resource->pbo);
+ check_error();
+ glDeleteBuffers(1, &resource->pbo);
+ check_error();
+ glDeleteFramebuffers(2, resource->input_fbos);
+ check_error();
+ glDeleteFramebuffers(1, &resource->fade_fbo);
+ check_error();
+ glDeleteTextures(1, &resource->input_tex);
+ check_error();
+ glDeleteTextures(1, &resource->gray_tex);
+ check_error();
+ glDeleteTextures(1, &resource->fade_y_output_tex);
+ check_error();
+ glDeleteTextures(1, &resource->fade_cbcr_output_tex);
+ check_error();
+ glDeleteTextures(1, &resource->cb_tex);
+ check_error();
+ glDeleteTextures(1, &resource->cr_tex);
+ check_error();
+ }
+ assert(interpolate_resources.size() == num_interpolate_slots);
}
void VideoStream::start()
avctx->flags = AVFMT_FLAG_CUSTOM_IO;
}
+ AVCodecParameters *audio_codecpar = avcodec_parameters_alloc();
+
+ audio_codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
+ audio_codecpar->codec_id = AV_CODEC_ID_PCM_S32LE;
+ audio_codecpar->channel_layout = AV_CH_LAYOUT_STEREO;
+ audio_codecpar->channels = 2;
+ audio_codecpar->sample_rate = OUTPUT_FREQUENCY;
+
size_t width = global_flags.width, height = global_flags.height; // Doesn't matter for MJPEG.
- mux.reset(new Mux(avctx, width, height, Mux::CODEC_MJPEG, /*video_extradata=*/"", /*audio_codec_parameters=*/nullptr,
- AVCOL_SPC_BT709, COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}, Mux::WITH_SUBTITLES));
+ mux.reset(new Mux(avctx, width, height, Mux::CODEC_MJPEG, /*video_extradata=*/"", audio_codecpar,
+ AVCOL_SPC_BT709, COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}, with_subtitles));
+ avcodec_parameters_free(&audio_codecpar);
encode_thread = thread(&VideoStream::encode_thread_func, this);
}
void VideoStream::schedule_original_frame(steady_clock::time_point local_pts,
int64_t output_pts, function<void()> &&display_func,
QueueSpotHolder &&queue_spot_holder,
- FrameOnDisk frame, const string &subtitle)
+ FrameOnDisk frame, const string &subtitle, bool include_audio)
{
- fprintf(stderr, "output_pts=%ld original input_pts=%ld\n", output_pts, frame.pts);
-
- // Preload the file from disk, so that the encoder thread does not get stalled.
- // TODO: Consider sending it through the queue instead.
- (void)frame_reader.read_frame(frame);
+ fprintf(stderr, "output_pts=%" PRId64 " original input_pts=%" PRId64 "\n", output_pts, frame.pts);
QueuedFrame qf;
qf.local_pts = local_pts;
qf.type = QueuedFrame::ORIGINAL;
qf.output_pts = output_pts;
- qf.frame1 = frame;
qf.display_func = move(display_func);
qf.queue_spot_holder = move(queue_spot_holder);
qf.subtitle = subtitle;
+ FrameReader::Frame read_frame = frame_reader.read_frame(frame, /*read_video=*/true, include_audio);
+ qf.encoded_jpeg.reset(new string(move(read_frame.video)));
+ qf.audio = move(read_frame.audio);
lock_guard<mutex> lock(queue_lock);
frame_queue.push_back(move(qf));
FrameOnDisk frame1_spec, FrameOnDisk frame2_spec,
float fade_alpha, const string &subtitle)
{
- fprintf(stderr, "output_pts=%ld faded input_pts=%ld,%ld fade_alpha=%.2f\n", output_pts, frame1_spec.pts, frame2_spec.pts, fade_alpha);
+ fprintf(stderr, "output_pts=%" PRId64 " faded input_pts=%" PRId64 ",%" PRId64 " fade_alpha=%.2f\n", output_pts, frame1_spec.pts, frame2_spec.pts, fade_alpha);
// Get the temporary OpenGL resources we need for doing the fade.
// (We share these with interpolated frames, which is slightly
int64_t output_pts, function<void(shared_ptr<Frame>)> &&display_func,
QueueSpotHolder &&queue_spot_holder,
FrameOnDisk frame1, FrameOnDisk frame2,
- float alpha, FrameOnDisk secondary_frame, float fade_alpha, const string &subtitle)
+ float alpha, FrameOnDisk secondary_frame, float fade_alpha, const string &subtitle,
+ bool play_audio)
{
if (secondary_frame.pts != -1) {
- fprintf(stderr, "output_pts=%ld interpolated input_pts1=%ld input_pts2=%ld alpha=%.3f secondary_pts=%ld fade_alpha=%.2f\n", output_pts, frame1.pts, frame2.pts, alpha, secondary_frame.pts, fade_alpha);
+ fprintf(stderr, "output_pts=%" PRId64 " interpolated input_pts1=%" PRId64 " input_pts2=%" PRId64 " alpha=%.3f secondary_pts=%" PRId64 " fade_alpha=%.2f\n", output_pts, frame1.pts, frame2.pts, alpha, secondary_frame.pts, fade_alpha);
} else {
- fprintf(stderr, "output_pts=%ld interpolated input_pts1=%ld input_pts2=%ld alpha=%.3f\n", output_pts, frame1.pts, frame2.pts, alpha);
+ fprintf(stderr, "output_pts=%" PRId64 " interpolated input_pts1=%" PRId64 " input_pts2=%" PRId64 " alpha=%.3f\n", output_pts, frame1.pts, frame2.pts, alpha);
}
// Get the temporary OpenGL resources we need for doing the interpolation.
qf.local_pts = local_pts;
qf.subtitle = subtitle;
+ if (play_audio) {
+ qf.audio = frame_reader.read_frame(frame1, /*read_video=*/false, /*read_audio=*/true).audio;
+ }
+
check_error();
// Convert frame0 and frame1 to OpenGL textures.
bool did_decode;
shared_ptr<Frame> frame = decode_jpeg_with_cache(frame_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
ycbcr_converter->prepare_chain_for_conversion(frame)->render_to_fbo(resources->input_fbos[frame_no], global_flags.width, global_flags.height);
+ if (frame_no == 1) {
+ qf.exif_data = frame->exif_data; // Use the white point from the last frame.
+ }
}
glGenerateTextureMipmap(resources->input_tex);
shared_ptr<Frame> frame2 = decode_jpeg_with_cache(secondary_frame, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
// Then fade against it, putting it into the fade Y' and CbCr textures.
- ycbcr_semiplanar_converter->prepare_chain_for_fade_from_texture(qf.output_tex, global_flags.width, global_flags.height, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, global_flags.width, global_flags.height);
+ RGBTriplet neutral_color = get_neutral_color(qf.exif_data);
+ ycbcr_semiplanar_converter->prepare_chain_for_fade_from_texture(qf.output_tex, neutral_color, global_flags.width, global_flags.height, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, global_flags.width, global_flags.height);
// Subsample and split Cb/Cr.
chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, global_flags.width, global_flags.height, resources->cb_tex, resources->cr_tex);
interpolate_no_split->release_texture(qf.output_tex);
+
+ // We already applied the white balance, so don't have the client redo it.
+ qf.exif_data.clear();
} else {
tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, flow_tex, global_flags.width, global_flags.height, alpha);
check_error();
queue_changed.notify_all();
}
+void VideoStream::schedule_silence(steady_clock::time_point local_pts, int64_t output_pts,
+ int64_t length_pts, QueueSpotHolder &&queue_spot_holder)
+{
+ QueuedFrame qf;
+ qf.type = QueuedFrame::SILENCE;
+ qf.output_pts = output_pts;
+ qf.queue_spot_holder = move(queue_spot_holder);
+ qf.silence_length_pts = length_pts;
+
+ lock_guard<mutex> lock(queue_lock);
+ frame_queue.push_back(move(qf));
+ queue_changed.notify_all();
+}
+
namespace {
shared_ptr<Frame> frame_from_pbo(void *contents, size_t width, size_t height)
bool ok = make_current(context, surface);
if (!ok) {
fprintf(stderr, "Video stream couldn't get an OpenGL context\n");
- exit(1);
+ abort();
}
while (!should_quit) {
// Hack: We mux the subtitle packet one time unit before the actual frame,
// so that Nageru is sure to get it first.
- if (!qf.subtitle.empty()) {
+ if (!qf.subtitle.empty() && with_subtitles == Mux::WITH_SUBTITLES) {
AVPacket pkt;
av_init_packet(&pkt);
pkt.stream_index = mux->get_subtitle_stream_idx();
if (qf.type == QueuedFrame::ORIGINAL) {
// Send the JPEG frame on, unchanged.
- string jpeg = frame_reader.read_frame(qf.frame1);
+ string jpeg = move(*qf.encoded_jpeg);
AVPacket pkt;
av_init_packet(&pkt);
pkt.stream_index = 0;
pkt.size = jpeg.size();
pkt.flags = AV_PKT_FLAG_KEY;
mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ last_frame = move(jpeg);
- last_frame.assign(&jpeg[0], &jpeg[0] + jpeg.size());
+ add_audio_or_silence(qf);
} else if (qf.type == QueuedFrame::FADED) {
glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height);
+ assert(frame->exif_data.empty());
// Now JPEG encode it, and send it on to the stream.
- vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height);
+ string jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height, /*exif_data=*/"");
AVPacket pkt;
av_init_packet(&pkt);
pkt.flags = AV_PKT_FLAG_KEY;
mux->add_packet(pkt, qf.output_pts, qf.output_pts);
last_frame = move(jpeg);
+
+ add_audio_or_silence(qf);
} else if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) {
glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
}
// Now JPEG encode it, and send it on to the stream.
- vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height);
+ string jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height, move(qf.exif_data));
if (qf.flow_tex != 0) {
compute_flow->release_texture(qf.flow_tex);
}
pkt.flags = AV_PKT_FLAG_KEY;
mux->add_packet(pkt, qf.output_pts, qf.output_pts);
last_frame = move(jpeg);
+
+ add_audio_or_silence(qf);
} else if (qf.type == QueuedFrame::REFRESH) {
AVPacket pkt;
av_init_packet(&pkt);
pkt.size = last_frame.size();
pkt.flags = AV_PKT_FLAG_KEY;
mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+
+ add_audio_or_silence(qf); // Definitely silence.
+ } else if (qf.type == QueuedFrame::SILENCE) {
+ add_silence(qf.output_pts, qf.silence_length_pts);
} else {
assert(false);
}
type = AVIO_DATA_MARKER_SYNC_POINT;
}
+ HTTPD::StreamID stream_id{ HTTPD::MAIN_STREAM, 0 };
if (type == AVIO_DATA_MARKER_HEADER) {
stream_mux_header.append((char *)buf, buf_size);
- global_httpd->set_header(HTTPD::MAIN_STREAM, stream_mux_header);
+ global_httpd->set_header(stream_id, stream_mux_header);
} else {
- global_httpd->add_data(HTTPD::MAIN_STREAM, (char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
+ global_httpd->add_data(stream_id, (char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
}
return buf_size;
}
+
+void VideoStream::add_silence(int64_t pts, int64_t length_pts)
+{
+ // At 59.94, this will never quite add up (even discounting refresh frames,
+ // which have unpredictable length), but hopefully, the player in the other
+ // end should be able to stretch silence easily enough.
+ long num_samples = lrint(length_pts * double(OUTPUT_FREQUENCY) / double(TIMEBASE)) * 2;
+ uint8_t *zero = (uint8_t *)calloc(num_samples, sizeof(int32_t));
+
+ AVPacket pkt;
+ av_init_packet(&pkt);
+ pkt.stream_index = 1;
+ pkt.data = zero;
+ pkt.size = num_samples * sizeof(int32_t);
+ pkt.flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(pkt, pts, pts);
+
+ free(zero);
+}
+
+void VideoStream::add_audio_or_silence(const QueuedFrame &qf)
+{
+ if (qf.audio.empty()) {
+ int64_t frame_length = lrint(double(TIMEBASE) / global_flags.output_framerate);
+ add_silence(qf.output_pts, frame_length);
+ } else {
+ AVPacket pkt;
+ av_init_packet(&pkt);
+ pkt.stream_index = 1;
+ pkt.data = (uint8_t *)qf.audio.data();
+ pkt.size = qf.audio.size();
+ pkt.flags = AV_PKT_FLAG_KEY;
+ mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+ }
+}