#include <jpeglib.h>
#include <stdint.h>
+#include <unistd.h>
#include <atomic>
#include <condition_variable>
using namespace movit;
using namespace std;
-bool operator< (const JPEGID &a, const JPEGID &b) {
- return make_pair(a.stream_idx, a.pts) < make_pair(b.stream_idx, b.pts);
-}
+// Just an arbitrary order for std::map.
+struct JPEGIDLexicalOrder
+{
+ bool operator() (const JPEGID &a, const JPEGID &b) const
+ {
+ if (a.stream_idx != b.stream_idx)
+ return a.stream_idx < b.stream_idx;
+ if (a.pts != b.pts)
+ return a.pts < b.pts;
+ return a.interpolated < b.interpolated;
+ }
+};
struct LRUFrame {
shared_ptr<Frame> frame;
};
mutex cache_mu;
-map<JPEGID, LRUFrame> cache; // Under cache_mu.
-condition_variable any_pending_decodes;
+map<JPEGID, LRUFrame, JPEGIDLexicalOrder> cache; // Under cache_mu.
+condition_variable any_pending_decodes, cache_updated;
deque<pair<JPEGID, JPEGFrameView *>> pending_decodes; // Under cache_mu.
atomic<size_t> event_counter{0};
extern QGLWidget *global_share_widget;
return nullptr;
}
+ assert(!id.interpolated);
*did_decode = true;
shared_ptr<Frame> frame = decode_jpeg(filename_for_frame(id.stream_idx, id.pts));
}
bool found_in_cache;
- shared_ptr<Frame> frame = decode_jpeg_with_cache(id, cache_miss_behavior, &found_in_cache);
+ shared_ptr<Frame> frame;
+ if (id.interpolated) {
+ // Interpolated frames are never decoded by us,
+ // put directly into the cache from VideoStream.
+ unique_lock<mutex> lock(cache_mu);
+ cache_updated.wait(lock, [id] {
+ return cache.count(id) != 0;
+ });
+ found_in_cache = true; // Don't count it as a decode.
+
+ auto it = cache.find(id);
+ assert(it != cache.end());
+
+ it->second.last_used = event_counter++;
+ frame = it->second.frame;
+ if (frame == nullptr) {
+ // We inserted a nullptr as signal that the frame was never
+ // interpolated and that we should stop waiting.
+ // But don't let it linger in the cache anymore.
+ cache.erase(it);
+ }
+ } else {
+ frame = decode_jpeg_with_cache(id, cache_miss_behavior, &found_in_cache);
+ }
if (frame == nullptr) {
- assert(cache_miss_behavior == RETURN_NULLPTR_IF_NOT_IN_CACHE);
+ assert(id.interpolated || cache_miss_behavior == RETURN_NULLPTR_IF_NOT_IN_CACHE);
++num_dropped;
continue;
}
}
}
+ // TODO: Could we get jitter between non-interpolated and interpolated frames here?
dest->setDecodedFrame(frame);
}
}
: QGLWidget(parent, global_share_widget) {
}
-void JPEGFrameView::setFrame(unsigned stream_idx, int64_t pts)
+void JPEGFrameView::setFrame(unsigned stream_idx, int64_t pts, bool interpolated)
{
current_stream_idx = stream_idx;
unique_lock<mutex> lock(cache_mu);
- pending_decodes.emplace_back(JPEGID{ stream_idx, pts }, this);
+ pending_decodes.emplace_back(JPEGID{ stream_idx, pts, interpolated }, this);
any_pending_decodes.notify_all();
}
+void JPEGFrameView::insert_interpolated_frame(unsigned stream_idx, int64_t pts, shared_ptr<Frame> frame)
+{
+ JPEGID id{ stream_idx, pts, true };
+
+ // We rely on the frame not being evicted from the cache before
+ // jpeg_decoder_thread() sees it and can display it (otherwise,
+ // that thread would hang). With a default cache of 1000 elements,
+ // that would sound like a reasonable assumption.
+ unique_lock<mutex> lock(cache_mu);
+ cache[id] = LRUFrame{ std::move(frame), event_counter++ };
+ cache_updated.notify_all();
+}
+
ResourcePool *resource_pool = nullptr;
void JPEGFrameView::initializeGL()
struct JPEGID {
unsigned stream_idx;
int64_t pts;
+ bool interpolated;
};
struct Frame {
std::unique_ptr<uint8_t[]> y, cb, cr;
public:
JPEGFrameView(QWidget *parent);
- void setFrame(unsigned stream_idx, int64_t pts);
+ void setFrame(unsigned stream_idx, int64_t pts, bool interpolated);
+ static void insert_interpolated_frame(unsigned stream_idx, int64_t pts, std::shared_ptr<Frame> frame);
void mousePressEvent(QMouseEvent *event) override;
void setDecodedFrame(std::shared_ptr<Frame> frame);
+
signals:
void clicked();
post_to_main_thread([pkt] {
if (pkt.stream_index == 0) {
- global_mainwindow->ui->input1_display->setFrame(pkt.stream_index, pkt.pts);
+ global_mainwindow->ui->input1_display->setFrame(pkt.stream_index, pkt.pts, /*interpolated=*/false);
} else if (pkt.stream_index == 1) {
- global_mainwindow->ui->input2_display->setFrame(pkt.stream_index, pkt.pts);
+ global_mainwindow->ui->input2_display->setFrame(pkt.stream_index, pkt.pts, /*interpolated=*/false);
} else if (pkt.stream_index == 2) {
- global_mainwindow->ui->input3_display->setFrame(pkt.stream_index, pkt.pts);
+ global_mainwindow->ui->input3_display->setFrame(pkt.stream_index, pkt.pts, /*interpolated=*/false);
} else if (pkt.stream_index == 3) {
- global_mainwindow->ui->input4_display->setFrame(pkt.stream_index, pkt.pts);
+ global_mainwindow->ui->input4_display->setFrame(pkt.stream_index, pkt.pts, /*interpolated=*/false);
}
});
}
if (in_pts_lower == in_pts_upper) {
- destination->setFrame(stream_idx, in_pts_lower);
+ destination->setFrame(stream_idx, in_pts_lower, /*interpolated=*/false);
if (video_stream != nullptr) {
video_stream->schedule_original_frame(lrint(out_pts), stream_idx, in_pts_lower);
}
double in_pts_lower_as_frameno = (in_pts_lower - in_pts_origin) * output_framerate / TIMEBASE / speed;
double in_pts_upper_as_frameno = (in_pts_upper - in_pts_origin) * output_framerate / TIMEBASE / speed;
if (fabs(in_pts_lower_as_frameno - frameno) < 0.01) {
- destination->setFrame(stream_idx, in_pts_lower);
+ destination->setFrame(stream_idx, in_pts_lower, /*interpolated=*/false);
if (video_stream != nullptr) {
video_stream->schedule_original_frame(lrint(out_pts), stream_idx, in_pts_lower);
}
in_pts_origin += in_pts_lower - in_pts;
continue;
} else if (fabs(in_pts_upper_as_frameno - frameno) < 0.01) {
- destination->setFrame(stream_idx, in_pts_upper);
+ destination->setFrame(stream_idx, in_pts_upper, /*interpolated=*/false);
if (video_stream != nullptr) {
video_stream->schedule_original_frame(lrint(out_pts), stream_idx, in_pts_upper);
}
}
double alpha = double(in_pts - in_pts_lower) / (in_pts_upper - in_pts_lower);
- destination->setFrame(stream_idx, in_pts_lower); // FIXME
- if (video_stream != nullptr) {
- // Send the frame to the stream.
+ if (video_stream == nullptr) {
+ // Previews don't do any interpolation.
+ destination->setFrame(stream_idx, in_pts_lower, /*interpolated=*/false);
+ } else {
+ // Calculate the interpolated frame. When it's done, the destination
+ // will be unblocked.
+ destination->setFrame(stream_idx, lrint(out_pts), /*interpolated=*/true);
video_stream->schedule_interpolated_frame(lrint(out_pts), stream_idx, in_pts_lower, in_pts_upper, alpha);
}
}
if (it == frames[stream_idx].end()) {
return;
}
- destination->setFrame(stream_idx, *it);
+ destination->setFrame(stream_idx, *it, /*interpolated=*/false);
}
unique_lock<mutex> lock(queue_lock);
if (interpolate_resources.empty()) {
fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
+ JPEGFrameView::insert_interpolated_frame(stream_idx, output_pts, nullptr);
return;
}
resources = interpolate_resources.front();
JPEGID jpeg_id;
jpeg_id.stream_idx = stream_idx;
jpeg_id.pts = frame_no == 1 ? input_second_pts : input_first_pts;
+ jpeg_id.interpolated = false;
bool did_decode;
shared_ptr<Frame> frame = decode_jpeg_with_cache(jpeg_id, DECODE_IF_NOT_IN_CACHE, &did_decode);
ycbcr_format.chroma_subsampling_x = frame->chroma_subsampling_x;
} else if (qf.type == QueuedFrame::INTERPOLATED) {
glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
- vector<uint8_t> jpeg = encode_jpeg(
- (const uint8_t *)qf.resources.pbo_contents,
- (const uint8_t *)qf.resources.pbo_contents + 1280 * 720,
- (const uint8_t *)qf.resources.pbo_contents + 1280 * 720 + 640 * 720,
- 1280, 720);
+ const uint8_t *y = (const uint8_t *)qf.resources.pbo_contents;
+ const uint8_t *cb = (const uint8_t *)qf.resources.pbo_contents + 1280 * 720;
+ const uint8_t *cr = (const uint8_t *)qf.resources.pbo_contents + 1280 * 720 + 640 * 720;
+
+ // Send a copy of the frame on to display.
+ shared_ptr<Frame> frame(new Frame);
+ frame->y.reset(new uint8_t[1280 * 720]);
+ frame->cb.reset(new uint8_t[640 * 720]);
+ frame->cr.reset(new uint8_t[640 * 720]);
+ for (unsigned yy = 0; yy < 720; ++yy) {
+ memcpy(frame->y.get() + 1280 * yy, y + 1280 * (719 - yy), 1280);
+ memcpy(frame->cb.get() + 640 * yy, cb + 640 * (719 - yy), 640);
+ memcpy(frame->cr.get() + 640 * yy, cr + 640 * (719 - yy), 640);
+ }
+ frame->width = 1280;
+ frame->height = 720;
+ frame->chroma_subsampling_x = 2;
+ frame->chroma_subsampling_y = 1;
+ frame->pitch_y = 1280;
+ frame->pitch_chroma = 640;
+ JPEGFrameView::insert_interpolated_frame(qf.stream_idx, qf.output_pts, std::move(frame));
+
+ // Now JPEG encode it, and send it on to the stream.
+ vector<uint8_t> jpeg = encode_jpeg(y, cb, cr, 1280, 720);
compute_flow->release_texture(qf.flow_tex);
interpolate->release_texture(qf.output_tex);
interpolate->release_texture(qf.cbcr_tex);