#include "context.h"
#include "defs.h"
+#include "flags.h"
#include "httpd.h"
#include "timebase.h"
#define BITSTREAM_ALLOCATE_STEPPING 4096
#define SURFACE_NUM 16 /* 16 surfaces for source YUV */
+#define MAX_NUM_REF1 16 // Seemingly a hardware-fixed value, not related to SURFACE_NUM
+#define MAX_NUM_REF2 32 // Seemingly a hardware-fixed value, not related to SURFACE_NUM
static constexpr unsigned int MaxFrameNum = (2<<16);
static constexpr unsigned int MaxPicOrderCntLsb = (2<<8);
using namespace std;
+// H.264 video comes out in encoding order (e.g. with two B-frames:
+// 0, 3, 1, 2, 6, 4, 5, etc.), but uncompressed video needs to
+// come in the right order. Since we do everything, including waiting
+// for the frames to come out of OpenGL, in encoding order, we need
+// a reordering buffer for uncompressed frames so that they come out
+// correctly. We go the super-lazy way of not making it understand
+// anything about the true order (which introduces some extra latency,
+// though); we know that for N B-frames we need at most (N-1) frames
+// in the reorder buffer, and can just sort on that.
+//
+// The class also deals with keeping a freelist as needed.
+class FrameReorderer {
+public:
+ FrameReorderer(unsigned queue_length, int width, int height);
+
+ // Returns the next frame to insert with its pts, if any. Otherwise -1 and nullptr.
+ // Does _not_ take ownership of data; a copy is taken if needed.
+ // The returned pointer is valid until the next call to reorder_frame, or destruction.
+ // As a special case, if queue_length == 0, will just return pts and data (no reordering needed).
+ pair<int64_t, const uint8_t *> reorder_frame(int64_t pts, const uint8_t *data);
+
+ // The same as reorder_frame, but without inserting anything. Used to empty the queue.
+ pair<int64_t, const uint8_t *> get_first_frame();
+
+ bool empty() const { return frames.empty(); }
+
+private:
+ unsigned queue_length;
+ int width, height;
+
+ priority_queue<pair<int64_t, uint8_t *>> frames;
+ stack<uint8_t *> freelist; // Includes the last value returned from reorder_frame.
+
+ // Owns all the pointers. Normally, freelist and frames could do this themselves,
+ // except priority_queue doesn't work well with movable-only types.
+ vector<unique_ptr<uint8_t[]>> owner;
+};
+
+FrameReorderer::FrameReorderer(unsigned queue_length, int width, int height)
+ : queue_length(queue_length), width(width), height(height)
+{
+ for (unsigned i = 0; i < queue_length; ++i) {
+ owner.emplace_back(new uint8_t[width * height * 2]);
+ freelist.push(owner.back().get());
+ }
+}
+
+pair<int64_t, const uint8_t *> FrameReorderer::reorder_frame(int64_t pts, const uint8_t *data)
+{
+ if (queue_length == 0) {
+ return make_pair(pts, data);
+ }
+
+ assert(!freelist.empty());
+ uint8_t *storage = freelist.top();
+ freelist.pop();
+ memcpy(storage, data, width * height * 2);
+ frames.emplace(-pts, storage); // Invert pts to get smallest first.
+
+ if (frames.size() >= queue_length) {
+ return get_first_frame();
+ } else {
+ return make_pair(-1, nullptr);
+ }
+}
+
+pair<int64_t, const uint8_t *> FrameReorderer::get_first_frame()
+{
+ assert(!frames.empty());
+ pair<int64_t, uint8_t *> storage = frames.top();
+ frames.pop();
+ int64_t pts = storage.first;
+ freelist.push(storage.second);
+ return make_pair(-pts, storage.second); // Re-invert pts (see reorder_frame()).
+}
+
class H264EncoderImpl {
public:
H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd);
~H264EncoderImpl();
void add_audio(int64_t pts, vector<float> audio);
bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex);
- void end_frame(RefCountedGLsync fence, int64_t pts, const vector<RefCountedFrame> &input_frames);
+ RefCountedGLsync end_frame(int64_t pts, const vector<RefCountedFrame> &input_frames);
void shutdown();
private:
int64_t pts;
};
+ // So we never get negative dts.
+ int64_t global_delay() const {
+ return int64_t(ip_period - 1) * (TIMEBASE / MAX_FPS);
+ }
+
void encode_thread_func();
void encode_remaining_frames_as_p(int encoding_frame_num, int gop_start_display_frame_num, int64_t last_dts);
+ void add_packet_for_uncompressed_frame(int64_t pts, const uint8_t *data);
void encode_frame(PendingFrame frame, int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num,
int frame_type, int64_t pts, int64_t dts);
void storage_task_thread();
+ void encode_audio(const vector<float> &audio,
+ int64_t audio_pts,
+ AVCodecContext *ctx,
+ HTTPD::PacketDestination destination);
void storage_task_enqueue(storage_task task);
void save_codeddata(storage_task task);
int render_packedsequence();
int build_packed_slice_buffer(unsigned char **header_buffer);
int init_va(const string &va_display);
int deinit_va();
+ void enable_zerocopy_if_possible();
VADisplay va_open_display(const string &va_display);
void va_close_display(VADisplay va_dpy);
int setup_encode();
QSurface *surface;
AVCodecContext *context_audio;
+ AVFrame *audio_frame = nullptr;
HTTPD *httpd;
+ unique_ptr<FrameReorderer> reorderer;
Display *x11_display = nullptr;
VAEncPictureParameterBufferH264 pic_param;
VAEncSliceParameterBufferH264 slice_param;
VAPictureH264 CurrentCurrPic;
- VAPictureH264 ReferenceFrames[16], RefPicList0_P[32], RefPicList0_B[32], RefPicList1_B[32];
+ VAPictureH264 ReferenceFrames[MAX_NUM_REF1], RefPicList0_P[MAX_NUM_REF2], RefPicList0_B[MAX_NUM_REF2], RefPicList1_B[MAX_NUM_REF2];
// Static quality settings.
static constexpr unsigned int frame_bitrate = 15000000 / 60; // Doesn't really matter; only initial_qp does.
}
}
+void H264EncoderImpl::enable_zerocopy_if_possible()
+{
+ if (global_flags.uncompressed_video_to_http) {
+ fprintf(stderr, "Disabling zerocopy H.264 encoding due to --uncompressed_video_to_http.\n");
+ use_zerocopy = false;
+ } else {
+ use_zerocopy = true;
+ }
+}
+
VADisplay H264EncoderImpl::va_open_display(const string &va_display)
{
if (va_display.empty()) {
fprintf(stderr, "error: can't connect to X server!\n");
return NULL;
}
- use_zerocopy = true;
+ enable_zerocopy_if_possible();
return vaGetDisplay(x11_display);
} else if (va_display[0] != '/') {
x11_display = XOpenDisplay(va_display.c_str());
fprintf(stderr, "error: can't connect to X server!\n");
return NULL;
}
- use_zerocopy = true;
+ enable_zerocopy_if_possible();
return vaGetDisplay(x11_display);
} else {
drm_fd = open(va_display.c_str(), O_RDWR);
if (support_encode == 0) {
printf("Can't find VAEntrypointEncSlice for H264 profiles. If you are using a non-Intel GPU\n");
- printf("but have one in your system, try launching Nageru with --va-display /dev/dri/card0\n");
+ printf("but have one in your system, try launching Nageru with --va-display /dev/dri/renderD128\n");
printf("to use VA-API against DRM instead of X11.\n");
exit(1);
} else {
CurrentCurrPic = pic_param.CurrPic;
memcpy(pic_param.ReferenceFrames, ReferenceFrames, numShortTerm*sizeof(VAPictureH264));
- for (i = numShortTerm; i < SURFACE_NUM; i++) {
+ for (i = numShortTerm; i < MAX_NUM_REF1; i++) {
pic_param.ReferenceFrames[i].picture_id = VA_INVALID_SURFACE;
pic_param.ReferenceFrames[i].flags = VA_PICTURE_H264_INVALID;
}
int refpiclist0_max = h264_maxref & 0xffff;
memcpy(slice_param.RefPicList0, RefPicList0_P, refpiclist0_max*sizeof(VAPictureH264));
- for (i = refpiclist0_max; i < 32; i++) {
+ for (i = refpiclist0_max; i < MAX_NUM_REF2; i++) {
slice_param.RefPicList0[i].picture_id = VA_INVALID_SURFACE;
slice_param.RefPicList0[i].flags = VA_PICTURE_H264_INVALID;
}
int refpiclist1_max = (h264_maxref >> 16) & 0xffff;
memcpy(slice_param.RefPicList0, RefPicList0_B, refpiclist0_max*sizeof(VAPictureH264));
- for (i = refpiclist0_max; i < 32; i++) {
+ for (i = refpiclist0_max; i < MAX_NUM_REF2; i++) {
slice_param.RefPicList0[i].picture_id = VA_INVALID_SURFACE;
slice_param.RefPicList0[i].flags = VA_PICTURE_H264_INVALID;
}
memcpy(slice_param.RefPicList1, RefPicList1_B, refpiclist1_max*sizeof(VAPictureH264));
- for (i = refpiclist1_max; i < 32; i++) {
+ for (i = refpiclist1_max; i < MAX_NUM_REF2; i++) {
slice_param.RefPicList1[i].picture_id = VA_INVALID_SURFACE;
slice_param.RefPicList1[i].flags = VA_PICTURE_H264_INVALID;
}
void H264EncoderImpl::save_codeddata(storage_task task)
{
- VACodedBufferSegment *buf_list = NULL;
- VAStatus va_status;
+ VACodedBufferSegment *buf_list = NULL;
+ VAStatus va_status;
- string data;
+ string data;
- const int64_t global_delay = int64_t(ip_period - 1) * (TIMEBASE / MAX_FPS); // So we never get negative dts.
+ va_status = vaMapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list));
+ CHECK_VASTATUS(va_status, "vaMapBuffer");
+ while (buf_list != NULL) {
+ data.append(reinterpret_cast<const char *>(buf_list->buf), buf_list->size);
+ buf_list = (VACodedBufferSegment *) buf_list->next;
+ }
+ vaUnmapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf);
- va_status = vaMapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list));
- CHECK_VASTATUS(va_status, "vaMapBuffer");
- while (buf_list != NULL) {
- data.append(reinterpret_cast<const char *>(buf_list->buf), buf_list->size);
- buf_list = (VACodedBufferSegment *) buf_list->next;
- }
- vaUnmapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf);
-
- {
- // Add video.
- AVPacket pkt;
- memset(&pkt, 0, sizeof(pkt));
- pkt.buf = nullptr;
- pkt.data = reinterpret_cast<uint8_t *>(&data[0]);
- pkt.size = data.size();
- pkt.stream_index = 0;
- if (task.frame_type == FRAME_IDR || task.frame_type == FRAME_I) {
- pkt.flags = AV_PKT_FLAG_KEY;
- } else {
- pkt.flags = 0;
- }
- //pkt.duration = 1;
- httpd->add_packet(pkt, task.pts + global_delay, task.dts + global_delay);
- }
- // Encode and add all audio frames up to and including the pts of this video frame.
- for ( ;; ) {
- int64_t audio_pts;
- vector<float> audio;
- {
- unique_lock<mutex> lock(frame_queue_mutex);
- frame_queue_nonempty.wait(lock, [this]{ return storage_thread_should_quit || !pending_audio_frames.empty(); });
- if (storage_thread_should_quit && pending_audio_frames.empty()) return;
- auto it = pending_audio_frames.begin();
- if (it->first > task.pts) break;
- audio_pts = it->first;
- audio = move(it->second);
- pending_audio_frames.erase(it);
- }
+ {
+ // Add video.
+ AVPacket pkt;
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.buf = nullptr;
+ pkt.data = reinterpret_cast<uint8_t *>(&data[0]);
+ pkt.size = data.size();
+ pkt.stream_index = 0;
+ if (task.frame_type == FRAME_IDR) {
+ pkt.flags = AV_PKT_FLAG_KEY;
+ } else {
+ pkt.flags = 0;
+ }
+ //pkt.duration = 1;
+ httpd->add_packet(pkt, task.pts + global_delay(), task.dts + global_delay(),
+ global_flags.uncompressed_video_to_http ? HTTPD::DESTINATION_FILE_ONLY : HTTPD::DESTINATION_FILE_AND_HTTP);
+ }
+ // Encode and add all audio frames up to and including the pts of this video frame.
+ for ( ;; ) {
+ int64_t audio_pts;
+ vector<float> audio;
+ {
+ unique_lock<mutex> lock(frame_queue_mutex);
+ frame_queue_nonempty.wait(lock, [this]{ return storage_thread_should_quit || !pending_audio_frames.empty(); });
+ if (storage_thread_should_quit && pending_audio_frames.empty()) return;
+ auto it = pending_audio_frames.begin();
+ if (it->first > task.pts) break;
+ audio_pts = it->first;
+ audio = move(it->second);
+ pending_audio_frames.erase(it);
+ }
- AVFrame *frame = avcodec_alloc_frame();
- frame->nb_samples = audio.size() / 2;
- frame->format = AV_SAMPLE_FMT_S32;
- frame->channel_layout = AV_CH_LAYOUT_STEREO;
+ encode_audio(audio, audio_pts, context_audio, HTTPD::DESTINATION_FILE_AND_HTTP);
- unique_ptr<int32_t[]> int_samples(new int32_t[audio.size()]);
- int ret = avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), audio.size() * sizeof(int32_t), 1);
- if (ret < 0) {
- fprintf(stderr, "avcodec_fill_audio_frame() failed with %d\n", ret);
- exit(1);
- }
- for (int i = 0; i < frame->nb_samples * 2; ++i) {
- if (audio[i] >= 1.0f) {
- int_samples[i] = 2147483647;
- } else if (audio[i] <= -1.0f) {
- int_samples[i] = -2147483647;
- } else {
- int_samples[i] = lrintf(audio[i] * 2147483647.0f);
- }
- }
+ if (audio_pts == task.pts) break;
+ }
+}
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.data = nullptr;
- pkt.size = 0;
- int got_output;
- avcodec_encode_audio2(context_audio, &pkt, frame, &got_output);
- if (got_output) {
- pkt.stream_index = 1;
- httpd->add_packet(pkt, audio_pts + global_delay, audio_pts + global_delay);
- }
- // TODO: Delayed frames.
- avcodec_free_frame(&frame);
- av_free_packet(&pkt);
- if (audio_pts == task.pts) break;
- }
+void H264EncoderImpl::encode_audio(
+ const vector<float> &audio,
+ int64_t audio_pts,
+ AVCodecContext *ctx,
+ HTTPD::PacketDestination destination)
+{
+ audio_frame->nb_samples = audio.size() / 2;
+ audio_frame->channel_layout = AV_CH_LAYOUT_STEREO;
+
+ unique_ptr<float[]> planar_samples;
+ unique_ptr<int32_t[]> int_samples;
+
+ if (ctx->sample_fmt == AV_SAMPLE_FMT_FLTP) {
+ audio_frame->format = AV_SAMPLE_FMT_FLTP;
+ planar_samples.reset(new float[audio.size()]);
+ avcodec_fill_audio_frame(audio_frame, 2, AV_SAMPLE_FMT_FLTP, (const uint8_t*)planar_samples.get(), audio.size() * sizeof(float), 0);
+ for (int i = 0; i < audio_frame->nb_samples; ++i) {
+ planar_samples[i] = audio[i * 2 + 0];
+ planar_samples[i + audio_frame->nb_samples] = audio[i * 2 + 1];
+ }
+ } else {
+ assert(ctx->sample_fmt == AV_SAMPLE_FMT_S32);
+ int_samples.reset(new int32_t[audio.size()]);
+ int ret = avcodec_fill_audio_frame(audio_frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), audio.size() * sizeof(int32_t), 1);
+ if (ret < 0) {
+ fprintf(stderr, "avcodec_fill_audio_frame() failed with %d\n", ret);
+ exit(1);
+ }
+ for (int i = 0; i < audio_frame->nb_samples * 2; ++i) {
+ if (audio[i] >= 1.0f) {
+ int_samples[i] = 2147483647;
+ } else if (audio[i] <= -1.0f) {
+ int_samples[i] = -2147483647;
+ } else {
+ int_samples[i] = lrintf(audio[i] * 2147483647.0f);
+ }
+ }
+ }
-#if 0
- printf("\r "); /* return back to startpoint */
- switch (encode_order % 4) {
- case 0:
- printf("|");
- break;
- case 1:
- printf("/");
- break;
- case 2:
- printf("-");
- break;
- case 3:
- printf("\\");
- break;
- }
- printf("%08lld", encode_order);
-#endif
+ AVPacket pkt;
+ av_init_packet(&pkt);
+ pkt.data = nullptr;
+ pkt.size = 0;
+ int got_output = 0;
+ avcodec_encode_audio2(context_audio, &pkt, audio_frame, &got_output);
+ if (got_output) {
+ pkt.stream_index = 1;
+ pkt.flags = AV_PKT_FLAG_KEY;
+ httpd->add_packet(pkt, audio_pts + global_delay(), audio_pts + global_delay(), destination);
+ }
+ // TODO: Delayed frames.
+ av_frame_unref(audio_frame);
+ av_free_packet(&pkt);
}
-
// this is weird. but it seems to put a new frame onto the queue
void H264EncoderImpl::storage_task_enqueue(storage_task task)
{
unique_lock<mutex> lock(storage_task_queue_mutex);
storage_task_queue.push(move(task));
- srcsurface_status[task.display_order % SURFACE_NUM] = SRC_SURFACE_IN_ENCODING;
storage_task_queue_changed.notify_all();
}
return 0;
}
+namespace {
-H264EncoderImpl::H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd)
- : current_storage_frame(0), surface(surface), httpd(httpd)
+void init_audio_encoder(const string &codec_name, int bit_rate, AVCodecContext **ctx)
{
- AVCodec *codec_audio = avcodec_find_encoder(AUDIO_OUTPUT_CODEC);
- context_audio = avcodec_alloc_context3(codec_audio);
- context_audio->bit_rate = AUDIO_OUTPUT_BIT_RATE;
+ AVCodec *codec_audio = avcodec_find_encoder_by_name(codec_name.c_str());
+ if (codec_audio == nullptr) {
+ fprintf(stderr, "ERROR: Could not find codec '%s'\n", codec_name.c_str());
+ exit(1);
+ }
+
+ AVCodecContext *context_audio = avcodec_alloc_context3(codec_audio);
+ context_audio->bit_rate = bit_rate;
context_audio->sample_rate = OUTPUT_FREQUENCY;
- context_audio->sample_fmt = AUDIO_OUTPUT_SAMPLE_FMT;
+
+ // Choose sample format; we currently only support these two
+ // (see encode_audio), so we're a bit picky.
+ const AVSampleFormat *ptr = codec_audio->sample_fmts;
+ for ( ; *ptr != -1; ++ptr) {
+ if (*ptr == AV_SAMPLE_FMT_FLTP || *ptr == AV_SAMPLE_FMT_S32) {
+ context_audio->sample_fmt = *ptr;
+ break;
+ }
+ }
+ if (*ptr == -1) {
+ fprintf(stderr, "ERROR: Audio codec does not support fltp or s32 sample formats\n");
+ exit(1);
+ }
+
context_audio->channels = 2;
context_audio->channel_layout = AV_CH_LAYOUT_STEREO;
context_audio->time_base = AVRational{1, TIMEBASE};
if (avcodec_open2(context_audio, codec_audio, NULL) < 0) {
- fprintf(stderr, "Could not open codec\n");
+ fprintf(stderr, "Could not open codec '%s'\n", codec_name.c_str());
exit(1);
}
+ *ctx = context_audio;
+}
+
+} // namespace
+
+H264EncoderImpl::H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd)
+ : current_storage_frame(0), surface(surface), httpd(httpd)
+{
+ init_audio_encoder(AUDIO_OUTPUT_CODEC_NAME, AUDIO_OUTPUT_BIT_RATE, &context_audio);
+
+ audio_frame = av_frame_alloc();
+
frame_width = width;
frame_height = height;
frame_width_mbaligned = (frame_width + 15) & (~15);
//print_input();
+ if (global_flags.uncompressed_video_to_http) {
+ reorderer.reset(new FrameReorderer(ip_period - 1, frame_width, frame_height));
+ }
+
init_va(va_display);
setup_encode();
H264EncoderImpl::~H264EncoderImpl()
{
shutdown();
+ av_frame_free(&audio_frame);
+
+ // TODO: Destroy context.
}
bool H264EncoderImpl::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
{
// Wait until this frame slot is done encoding.
unique_lock<mutex> lock(storage_task_queue_mutex);
+ if (srcsurface_status[current_storage_frame % SURFACE_NUM] != SRC_SURFACE_FREE) {
+ fprintf(stderr, "Warning: Slot %d (for frame %d) is still encoding, rendering has to wait for H.264 encoder\n",
+ current_storage_frame % SURFACE_NUM, current_storage_frame);
+ }
storage_task_queue_changed.wait(lock, [this]{ return storage_thread_should_quit || (srcsurface_status[current_storage_frame % SURFACE_NUM] == SRC_SURFACE_FREE); });
+ srcsurface_status[current_storage_frame % SURFACE_NUM] = SRC_SURFACE_IN_ENCODING;
if (storage_thread_should_quit) return false;
}
frame_queue_nonempty.notify_all();
}
-void H264EncoderImpl::end_frame(RefCountedGLsync fence, int64_t pts, const vector<RefCountedFrame> &input_frames)
+RefCountedGLsync H264EncoderImpl::end_frame(int64_t pts, const vector<RefCountedFrame> &input_frames)
{
assert(!is_shutdown);
glMemoryBarrier(GL_TEXTURE_UPDATE_BARRIER_BIT | GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
check_error();
- fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
- check_error();
}
+ RefCountedGLsync fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
+ check_error();
+ glFlush(); // Make the H.264 thread see the fence as soon as possible.
+ check_error();
+
{
unique_lock<mutex> lock(frame_queue_mutex);
pending_video_frames[current_storage_frame] = PendingFrame{ fence, input_frames, pts };
++current_storage_frame;
}
frame_queue_nonempty.notify_all();
+ return fence;
}
void H264EncoderImpl::shutdown()
encode_frame(frame, encoding_frame_num++, display_frame_num, gop_start_display_frame_num, FRAME_P, frame.pts, dts);
last_dts = dts;
}
+
+ if (global_flags.uncompressed_video_to_http) {
+ // Add frames left in reorderer.
+ while (!reorderer->empty()) {
+ pair<int64_t, const uint8_t *> output_frame = reorderer->get_first_frame();
+ add_packet_for_uncompressed_frame(output_frame.first, output_frame.second);
+ }
+ }
+}
+
+void H264EncoderImpl::add_packet_for_uncompressed_frame(int64_t pts, const uint8_t *data)
+{
+ AVPacket pkt;
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.buf = nullptr;
+ pkt.data = const_cast<uint8_t *>(data);
+ pkt.size = frame_width * frame_height * 2;
+ pkt.stream_index = 0;
+ pkt.flags = AV_PKT_FLAG_KEY;
+ httpd->add_packet(pkt, pts, pts, HTTPD::DESTINATION_HTTP_ONLY);
}
namespace {
va_status = vaUnmapBuffer(va_dpy, surf->surface_image.buf);
CHECK_VASTATUS(va_status, "vaUnmapBuffer");
+
+ if (global_flags.uncompressed_video_to_http) {
+ // Add uncompressed video. (Note that pts == dts here.)
+ // Delay needs to match audio.
+ pair<int64_t, const uint8_t *> output_frame = reorderer->reorder_frame(pts + global_delay(), reinterpret_cast<uint8_t *>(surf->y_ptr));
+ if (output_frame.second != nullptr) {
+ add_packet_for_uncompressed_frame(output_frame.first, output_frame.second);
+ }
+ }
}
va_status = vaDestroyImage(va_dpy, surf->surface_image.image_id);
return impl->begin_frame(y_tex, cbcr_tex);
}
-void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const vector<RefCountedFrame> &input_frames)
+RefCountedGLsync H264Encoder::end_frame(int64_t pts, const vector<RefCountedFrame> &input_frames)
{
- impl->end_frame(fence, pts, input_frames);
+ return impl->end_frame(pts, input_frames);
}
void H264Encoder::shutdown()