#include <stdio.h>
-extern "C" {
-#include <libavformat/avformat.h>
-#include <libavformat/avio.h>
-}
+#include <movit/util.h>
#include "clip_list.h"
+#include "context.h"
#include "defs.h"
#include "ffmpeg_raii.h"
#include "httpd.h"
#include "jpeg_frame_view.h"
#include "mux.h"
#include "player.h"
+#include "timebase.h"
+#include "video_stream.h"
using namespace std;
using namespace std::chrono;
extern vector<int64_t> frames[MAX_STREAMS];
extern HTTPD *global_httpd;
-namespace {
-
-string read_file(const string &filename)
+void Player::thread_func(bool also_output_to_stream)
{
- FILE *fp = fopen(filename.c_str(), "rb");
- if (fp == nullptr) {
- perror(filename.c_str());
- return "";
+ pthread_setname_np(pthread_self(), "Player");
+
+ QSurface *surface = create_surface();
+ QOpenGLContext *context = create_context(surface);
+ if (!make_current(context, surface)) {
+ printf("oops\n");
+ exit(1);
}
- fseek(fp, 0, SEEK_END);
- long len = ftell(fp);
- rewind(fp);
+ check_error();
- string ret;
- ret.resize(len);
- fread(&ret[0], len, 1, fp);
- fclose(fp);
- return ret;
-}
+ // Create the VideoStream object, now that we have an OpenGL context.
+ if (also_output_to_stream) {
+ video_stream.reset(new VideoStream);
+ video_stream->start();
+ }
+
+ check_error();
-} // namespace
+ constexpr double output_framerate = 60000.0 / 1001.0; // FIXME: make configurable
+ int64_t pts = 0;
-void Player::thread_func()
-{
for ( ;; ) {
// Wait until we're supposed to play something.
{
stream_idx = current_stream_idx;
}
steady_clock::time_point origin = steady_clock::now();
- int64_t pts_origin = clip.pts_in;
+ int64_t in_pts_origin = clip.pts_in;
+ int64_t out_pts_origin = pts;
- int64_t next_pts = pts_origin - 1; // Make sure we play the frame at clip.pts_in if it exists.
+ // Start playing exactly at a frame.
+ {
+ lock_guard<mutex> lock(frame_mu);
+
+ // Find the first frame such that frame.pts <= in_pts.
+ auto it = lower_bound(frames[stream_idx].begin(),
+ frames[stream_idx].end(),
+ in_pts_origin);
+ if (it != frames[stream_idx].end()) {
+ in_pts_origin = *it;
+ }
+ }
+
+ // TODO: Lock to a rational multiple of the frame rate if possible.
+ double speed = 0.5;
bool aborted = false;
- for ( ;; ) {
- // Find the next frame.
+ for (int frameno = 0; ; ++frameno) { // Ends when the clip ends.
+ double out_pts = out_pts_origin + TIMEBASE * frameno / output_framerate;
+ steady_clock::time_point next_frame_start =
+ origin + microseconds(lrint((out_pts - out_pts_origin) * 1e6 / TIMEBASE));
+ int64_t in_pts = lrint(in_pts_origin + TIMEBASE * frameno * speed / output_framerate);
+ pts = lrint(out_pts);
+
+ steady_clock::duration time_behind = steady_clock::now() - next_frame_start;
+ if (time_behind >= milliseconds(200)) {
+ fprintf(stderr, "WARNING: %ld ms behind, dropping a frame (no matter the type).\n",
+ lrint(1e3 * duration<double>(time_behind).count()));
+ continue;
+ }
+
+ int64_t in_pts_lower, in_pts_upper;
+
+ // Find the frame immediately before and after this point.
{
lock_guard<mutex> lock(frame_mu);
- auto it = upper_bound(frames[stream_idx].begin(),
+
+ // Find the first frame such that in_pts >= frame.pts.
+ auto it = lower_bound(frames[stream_idx].begin(),
frames[stream_idx].end(),
- next_pts);
+ in_pts);
if (it == frames[stream_idx].end() || *it >= clip.pts_out) {
break;
}
- next_pts = *it;
- }
+ in_pts_upper = *it;
- // FIXME: assumes a given timebase.
- double speed = 0.5;
- steady_clock::time_point next_frame_start =
- origin + microseconds((next_pts - pts_origin) * int(1000000 / speed) / 12800);
+ // Find the last frame such that in_pts <= frame.pts (if any).
+ if (it == frames[stream_idx].begin()) {
+ in_pts_lower = *it;
+ } else {
+ in_pts_lower = *(it - 1);
+ }
+ }
+ assert(in_pts >= in_pts_lower);
+ assert(in_pts <= in_pts_upper);
// Sleep until the next frame start, or until there's a new clip we're supposed to play.
{
}
}
- destination->setFrame(stream_idx, next_pts);
-
- // Send the frame to the stream.
- // FIXME: Vaguely less crazy pts, perhaps.
- double pts_float = fmod(duration<double>(next_frame_start.time_since_epoch()).count(), 86400.0f);
- int64_t pts = lrint(pts_float * TIMEBASE);
- string jpeg = read_file(filename_for_frame(stream_idx, next_pts));
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.stream_index = 0;
- pkt.data = (uint8_t *)jpeg.data();
- pkt.size = jpeg.size();
- stream_mux->add_packet(pkt, pts, pts);
+ if (progress_callback != nullptr) {
+ // NOTE: None of this will take into account any snapping done below.
+ double played_this_clip = double(in_pts - clip.pts_in) / TIMEBASE / speed;
+ double total_length = double(clip.pts_out - clip.pts_in) / TIMEBASE / speed;
+ progress_callback(played_this_clip, total_length);
+ }
+
+ if (in_pts_lower == in_pts_upper) {
+ destination->setFrame(stream_idx, in_pts_lower, /*interpolated=*/false);
+ if (video_stream != nullptr) {
+ video_stream->schedule_original_frame(lrint(out_pts), stream_idx, in_pts_lower);
+ }
+ continue;
+ }
+
+ // Snap to input frame: If we can do so with less than 1% jitter
+ // (ie., move less than 1% of an _output_ frame), do so.
+ double in_pts_lower_as_frameno = (in_pts_lower - in_pts_origin) * output_framerate / TIMEBASE / speed;
+ double in_pts_upper_as_frameno = (in_pts_upper - in_pts_origin) * output_framerate / TIMEBASE / speed;
+ if (fabs(in_pts_lower_as_frameno - frameno) < 0.01) {
+ destination->setFrame(stream_idx, in_pts_lower, /*interpolated=*/false);
+ if (video_stream != nullptr) {
+ video_stream->schedule_original_frame(lrint(out_pts), stream_idx, in_pts_lower);
+ }
+ in_pts_origin += in_pts_lower - in_pts;
+ continue;
+ } else if (fabs(in_pts_upper_as_frameno - frameno) < 0.01) {
+ destination->setFrame(stream_idx, in_pts_upper, /*interpolated=*/false);
+ if (video_stream != nullptr) {
+ video_stream->schedule_original_frame(lrint(out_pts), stream_idx, in_pts_upper);
+ }
+ in_pts_origin += in_pts_upper - in_pts;
+ continue;
+ }
+
+ if (time_behind >= milliseconds(100)) {
+ fprintf(stderr, "WARNING: %ld ms behind, dropping an interpolated frame.\n",
+ lrint(1e3 * duration<double>(time_behind).count()));
+ continue;
+ }
+
+ double alpha = double(in_pts - in_pts_lower) / (in_pts_upper - in_pts_lower);
+
+ if (video_stream == nullptr) {
+ // Previews don't do any interpolation.
+ destination->setFrame(stream_idx, in_pts_lower, /*interpolated=*/false);
+ } else {
+ // Calculate the interpolated frame. When it's done, the destination
+ // will be unblocked.
+ destination->setFrame(stream_idx, lrint(out_pts), /*interpolated=*/true);
+ video_stream->schedule_interpolated_frame(lrint(out_pts), stream_idx, in_pts_lower, in_pts_upper, alpha);
+ }
}
{
}
}
-Player::Player(JPEGFrameView *destination)
+Player::Player(JPEGFrameView *destination, bool also_output_to_stream)
: destination(destination)
{
- open_output_stream();
- thread(&Player::thread_func, this).detach();
+ thread(&Player::thread_func, this, also_output_to_stream).detach();
}
void Player::play_clip(const Clip &clip, unsigned stream_idx)
if (it == frames[stream_idx].end()) {
return;
}
- destination->setFrame(stream_idx, *it);
-}
-
-void Player::open_output_stream()
-{
- AVFormatContext *avctx = avformat_alloc_context();
- avctx->oformat = av_guess_format("nut", nullptr, nullptr);
-
- uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
- avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
- avctx->pb->write_data_type = &Player::write_packet2_thunk;
- avctx->pb->ignore_boundary_point = 1;
-
- Mux::Codec video_codec = Mux::CODEC_MJPEG;
-
- avctx->flags = AVFMT_FLAG_CUSTOM_IO;
-
- string video_extradata;
-
- constexpr int width = 1280, height = 720; // Doesn't matter for MJPEG.
- stream_mux.reset(new Mux(avctx, width, height, video_codec, video_extradata, /*audio_codec_parameters=*/nullptr, COARSE_TIMEBASE,
- /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}));
-}
-
-int Player::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
-{
- Player *player = (Player *)opaque;
- return player->write_packet2(buf, buf_size, type, time);
-}
-
-int Player::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
-{
- if (type == AVIO_DATA_MARKER_SYNC_POINT || type == AVIO_DATA_MARKER_BOUNDARY_POINT) {
- seen_sync_markers = true;
- } else if (type == AVIO_DATA_MARKER_UNKNOWN && !seen_sync_markers) {
- // We don't know if this is a keyframe or not (the muxer could
- // avoid marking it), so we just have to make the best of it.
- type = AVIO_DATA_MARKER_SYNC_POINT;
- }
-
- if (type == AVIO_DATA_MARKER_HEADER) {
- stream_mux_header.append((char *)buf, buf_size);
- global_httpd->set_header(stream_mux_header);
- } else {
- global_httpd->add_data((char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
- }
- return buf_size;
+ destination->setFrame(stream_idx, *it, /*interpolated=*/false);
}