+ AVStream *stream = format_ctx->streams[pkt.stream_index];
+ if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO &&
+ audio_stream_to_video_stream_idx.count(pkt.stream_index)) {
+ if ((pkt.size % (sizeof(uint32_t) * 2)) != 0) {
+ fprintf(stderr, "Audio stream %u had a packet of strange length %d, ignoring.\n",
+ pkt.stream_index, pkt.size);
+ } else {
+ // TODO: Endianness?
+ const uint32_t *begin = (const uint32_t *)pkt.data;
+ const uint32_t *end = (const uint32_t *)(pkt.data + pkt.size);
+ pending_audio[audio_stream_to_video_stream_idx[pkt.stream_index]].assign(begin, end);
+ }
+ }
+
+ if (pkt.stream_index >= MAX_STREAMS ||
+ stream->codecpar->codec_type != AVMEDIA_TYPE_VIDEO) {
+ continue;
+ }
+
+ ++metric_received_frames[pkt.stream_index];
+ metric_received_frame_size_bytes.count_event(pkt.size);
+
+ // Convert pts to our own timebase.
+ AVRational stream_timebase = stream->time_base;
+ int64_t pts = av_rescale_q(pkt.pts, stream_timebase, AVRational{ 1, TIMEBASE });
+
+ // Translate offset into our stream.
+ if (last_pts == -1) {
+ pts_offset = start_pts - pts;
+ }
+ pts = std::max(pts + pts_offset, start_pts);
+
+ //fprintf(stderr, "Got a frame from camera %d, pts = %ld, size = %d\n",
+ // pkt.stream_index, pts, pkt.size);
+ FrameOnDisk frame = write_frame(pkt.stream_index, pts, pkt.data, pkt.size, move(pending_audio[pkt.stream_index]), &db);
+
+ post_to_main_thread([pkt, frame] {
+ global_mainwindow->display_frame(pkt.stream_index, frame);
+ });
+
+ if (last_pts != -1 && global_flags.slow_down_input) {
+ this_thread::sleep_for(microseconds((pts - last_pts) * 1000000 / TIMEBASE));
+ }
+ last_pts = pts;
+ current_pts = pts;