exit(1);
}
- BMDDisplayModeFlags flags = display_mode->GetFlags();
- if ((flags & bmdDisplayModeColorspaceRec601) && global_flags.ycbcr_rec709_coefficients) {
- fprintf(stderr, "WARNING: Chosen output mode expects Rec. 601 Y'CbCr coefficients.\n");
- fprintf(stderr, " Consider --output-ycbcr-coefficients=rec601 (or =auto).\n");
- } else if ((flags & bmdDisplayModeColorspaceRec709) && !global_flags.ycbcr_rec709_coefficients) {
- fprintf(stderr, "WARNING: Chosen output mode expects Rec. 709 Y'CbCr coefficients.\n");
- fprintf(stderr, " Consider --output-ycbcr-coefficients=rec709 (or =auto).\n");
- }
+ current_mode_flags = display_mode->GetFlags();
BMDTimeValue time_value;
BMDTimeScale time_scale;
}
}
-void DeckLinkOutput::send_frame(GLuint y_tex, GLuint cbcr_tex, const vector<RefCountedFrame> &input_frames, int64_t pts, int64_t duration)
+void DeckLinkOutput::send_frame(GLuint y_tex, GLuint cbcr_tex, YCbCrLumaCoefficients output_ycbcr_coefficients, const vector<RefCountedFrame> &input_frames, int64_t pts, int64_t duration)
{
assert(!should_quit);
+ if ((current_mode_flags & bmdDisplayModeColorspaceRec601) && output_ycbcr_coefficients == YCBCR_REC_709) {
+ if (!last_frame_had_mode_mismatch) {
+ fprintf(stderr, "WARNING: Chosen output mode expects Rec. 601 Y'CbCr coefficients.\n");
+ fprintf(stderr, " Consider --output-ycbcr-coefficients=rec601 (or =auto).\n");
+ }
+ last_frame_had_mode_mismatch = true;
+ } else if ((current_mode_flags & bmdDisplayModeColorspaceRec709) && output_ycbcr_coefficients == YCBCR_REC_601) {
+ if (!last_frame_had_mode_mismatch) {
+ fprintf(stderr, "WARNING: Chosen output mode expects Rec. 709 Y'CbCr coefficients.\n");
+ fprintf(stderr, " Consider --output-ycbcr-coefficients=rec709 (or =auto).\n");
+ }
+ last_frame_had_mode_mismatch = true;
+ } else {
+ last_frame_had_mode_mismatch = false;
+ }
+
unique_ptr<Frame> frame = move(get_frame());
chroma_subsampler->create_uyvy(y_tex, cbcr_tex, width, height, frame->uyvy_tex);
return best_mode;
}
+YCbCrLumaCoefficients DeckLinkOutput::preferred_ycbcr_coefficients() const
+{
+ if (current_mode_flags & bmdDisplayModeColorspaceRec601) {
+ return YCBCR_REC_601;
+ } else {
+ // Don't bother checking bmdDisplayModeColorspaceRec709;
+ // if none is set, 709 is a good default anyway.
+ return YCBCR_REC_709;
+ }
+}
+
HRESULT DeckLinkOutput::ScheduledFrameCompleted(/* in */ IDeckLinkVideoFrame *completedFrame, /* in */ BMDOutputFrameCompletionResult result)
{
Frame *frame = static_cast<Frame *>(completedFrame);
#define _DECKLINK_OUTPUT_H 1
#include <epoxy/gl.h>
+#include <movit/image_format.h>
#include <stdint.h>
#include <atomic>
#include <chrono>
void start_output(uint32_t mode, int64_t base_pts); // Mode comes from get_available_video_modes().
void end_output();
- void send_frame(GLuint y_tex, GLuint cbcr_tex, const std::vector<RefCountedFrame> &input_frames, int64_t pts, int64_t duration);
+ void send_frame(GLuint y_tex, GLuint cbcr_tex, movit::YCbCrLumaCoefficients ycbcr_coefficients, const std::vector<RefCountedFrame> &input_frames, int64_t pts, int64_t duration);
void send_audio(int64_t pts, const std::vector<float> &samples);
// NOTE: The returned timestamp is undefined for preroll.
// If the given mode is supported, return it. If not, pick some “best” valid mode.
uint32_t pick_video_mode(uint32_t mode) const;
+ // Desired Y'CbCr coefficients for the current mode. Undefined before start_output().
+ movit::YCbCrLumaCoefficients preferred_ycbcr_coefficients() const;
+
// IUnknown.
HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) override;
ULONG STDMETHODCALLTYPE AddRef() override;
std::condition_variable frame_queues_changed;
bool playback_initiated = false, playback_started = false;
int64_t base_pts, frame_duration;
+ BMDDisplayModeFlags current_mode_flags = 0;
+ bool last_frame_had_mode_mismatch = false;
movit::ResourcePool *resource_pool;
IDeckLinkOutput *output = nullptr;
fprintf(stderr, " --audio-queue-length-ms=MS length of audio resampling queue (default 100.0)\n");
fprintf(stderr, " --output-ycbcr-coefficients={rec601,rec709,auto}\n");
fprintf(stderr, " Y'CbCr coefficient standard of output (default auto)\n");
- fprintf(stderr, " auto is rec709 if and only if --output-card is used\n");
- fprintf(stderr, " and a HD resolution is set\n");
+ fprintf(stderr, " auto is rec601, unless --output-card is used\n");
+ fprintf(stderr, " and a Rec. 709 mode (typically HD modes) is in use\n");
fprintf(stderr, " --output-buffer-frames=NUM number of frames in output buffer for --output-card,\n");
fprintf(stderr, " can be fractional (default 6.0); note also\n");
fprintf(stderr, " the audio queue can't be much longer than this\n");
// On the other hand, HDMI/SDI output typically requires Rec. 709 for
// HD resolutions (with no way of signaling anything else), which is
// a conflicting demand. In this case, we typically let the HDMI/SDI
- // output win, but the user can override this.
+ // output win if it is active, but the user can override this.
if (output_ycbcr_coefficients == "auto") {
- if (global_flags.output_card >= 0 && global_flags.width >= 1280) {
- global_flags.ycbcr_rec709_coefficients = true;
- } else {
- global_flags.ycbcr_rec709_coefficients = false;
- }
+ // Essentially: BT.709 if HDMI/SDI output is on, otherwise BT.601.
+ global_flags.ycbcr_rec709_coefficients = false;
+ global_flags.ycbcr_auto_coefficients = true;
} else if (output_ycbcr_coefficients == "rec709") {
global_flags.ycbcr_rec709_coefficients = true;
+ global_flags.ycbcr_auto_coefficients = false;
} else if (output_ycbcr_coefficients == "rec601") {
global_flags.ycbcr_rec709_coefficients = false;
+ global_flags.ycbcr_auto_coefficients = false;
} else {
fprintf(stderr, "ERROR: --output-ycbcr-coefficients must be “rec601”, “rec709” or “auto”\n");
exit(1);
std::string midi_mapping_filename; // Empty for none.
bool print_video_latency = false;
double audio_queue_length_ms = 100.0;
- bool ycbcr_rec709_coefficients = false;
+ bool ycbcr_rec709_coefficients = false; // Will be overridden by HDMI/SDI output if ycbcr_auto_coefficients == true.
+ bool ycbcr_auto_coefficients = true;
int output_card = -1;
double output_buffer_frames = 6.0;
double output_slop_frames = 0.5;
theme_main_chain.setup_chain();
//theme_main_chain.chain->enable_phase_timing(true);
+ // If HDMI/SDI output is active and the user has requested auto mode,
+ // its mode overrides the existing Y'CbCr setting for the chain.
+ YCbCrLumaCoefficients ycbcr_output_coefficients;
+ if (global_flags.ycbcr_auto_coefficients && output_card_index != -1) {
+ ycbcr_output_coefficients = cards[output_card_index].output->preferred_ycbcr_coefficients();
+ } else {
+ ycbcr_output_coefficients = global_flags.ycbcr_rec709_coefficients ? YCBCR_REC_709 : YCBCR_REC_601;
+ }
+
+ // TODO: Reduce the duplication against theme.cpp.
+ YCbCrFormat output_ycbcr_format;
+ output_ycbcr_format.chroma_subsampling_x = 1;
+ output_ycbcr_format.chroma_subsampling_y = 1;
+ output_ycbcr_format.luma_coefficients = ycbcr_output_coefficients;
+ output_ycbcr_format.full_range = false;
+ output_ycbcr_format.num_levels = 256;
+ chain->change_ycbcr_output_format(output_ycbcr_format);
+
const int64_t av_delay = lrint(global_flags.audio_queue_length_ms * 0.001 * TIMEBASE); // Corresponds to the delay in ResamplingQueue.
GLuint y_tex, cbcr_tex;
- bool got_frame = video_encoder->begin_frame(pts_int + av_delay, duration, theme_main_chain.input_frames, &y_tex, &cbcr_tex);
+ bool got_frame = video_encoder->begin_frame(pts_int + av_delay, duration, ycbcr_output_coefficients, theme_main_chain.input_frames, &y_tex, &cbcr_tex);
assert(got_frame);
// Render main chain. We take an extra copy of the created outputs,
GLuint cbcr_copy_tex = resource_pool->create_2d_texture(GL_RG8, global_flags.width / 2, global_flags.height / 2);
chroma_subsampler->subsample_chroma(cbcr_full_tex, global_flags.width, global_flags.height, cbcr_tex, cbcr_copy_tex);
if (output_card_index != -1) {
- cards[output_card_index].output->send_frame(y_tex, cbcr_full_tex, theme_main_chain.input_frames, pts_int, duration);
+ cards[output_card_index].output->send_frame(y_tex, cbcr_full_tex, ycbcr_output_coefficients, theme_main_chain.input_frames, pts_int, duration);
}
resource_pool->release_2d_texture(cbcr_full_tex);
}
#include "defs.h"
+#include "flags.h"
#include "timebase.h"
using namespace std;
// as noted in each comment.
// Note that the H.264 stream also contains this information and depending on the
// mux, this might simply get ignored. See sps_rbsp().
+ // Note that there's no way to change this per-frame as the H.264 stream
+ // would like to be able to.
avstream_video->codecpar->color_primaries = AVCOL_PRI_BT709; // RGB colorspace (inout_format.color_space).
avstream_video->codecpar->color_trc = AVCOL_TRC_UNSPECIFIED; // Gamma curve (inout_format.gamma_curve).
- avstream_video->codecpar->color_space = AVCOL_SPC_SMPTE170M; // YUV colorspace (output_ycbcr_format.luma_coefficients).
+ // YUV colorspace (output_ycbcr_format.luma_coefficients).
+ if (global_flags.ycbcr_rec709_coefficients) {
+ avstream_video->codecpar->color_space = AVCOL_SPC_BT709;
+ } else {
+ avstream_video->codecpar->color_space = AVCOL_SPC_SMPTE170M;
+ }
avstream_video->codecpar->color_range = AVCOL_RANGE_MPEG; // Full vs. limited range (output_ycbcr_format.full_range).
avstream_video->codecpar->chroma_location = AVCHROMA_LOC_LEFT; // Chroma sample location. See chroma_offset_0[] in Mixer::subsample_chroma().
avstream_video->codecpar->field_order = AV_FIELD_PROGRESSIVE;
#include "quicksync_encoder.h"
+#include <movit/image_format.h>
#include <movit/resource_pool.h> // Must be above the Xlib includes.
#include <movit/util.h>
#include "timebase.h"
#include "x264_encoder.h"
+using namespace movit;
using namespace std;
using namespace std::chrono;
using namespace std::placeholders;
bitstream_put_ui(bs, nal_unit_type, 5);
}
-void QuickSyncEncoderImpl::sps_rbsp(bitstream *bs)
+void QuickSyncEncoderImpl::sps_rbsp(YCbCrLumaCoefficients ycbcr_coefficients, bitstream *bs)
{
int profile_idc = PROFILE_IDC_BASELINE;
{
bitstream_put_ui(bs, 1, 8); /* colour_primaries (1 = BT.709) */
bitstream_put_ui(bs, 2, 8); /* transfer_characteristics (2 = unspecified, since we use sRGB) */
- if (global_flags.ycbcr_rec709_coefficients) {
+ if (ycbcr_coefficients == YCBCR_REC_709) {
bitstream_put_ui(bs, 1, 8); /* matrix_coefficients (1 = BT.709) */
} else {
+ assert(ycbcr_coefficients == YCBCR_REC_601);
bitstream_put_ui(bs, 6, 8); /* matrix_coefficients (6 = BT.601/SMPTE 170M) */
}
}
}
int
-QuickSyncEncoderImpl::build_packed_seq_buffer(unsigned char **header_buffer)
+QuickSyncEncoderImpl::build_packed_seq_buffer(YCbCrLumaCoefficients ycbcr_coefficients, unsigned char **header_buffer)
{
bitstream bs;
bitstream_start(&bs);
nal_start_code_prefix(&bs);
nal_header(&bs, NAL_REF_IDC_HIGH, NAL_SPS);
- sps_rbsp(&bs);
+ sps_rbsp(ycbcr_coefficients, &bs);
bitstream_end(&bs);
*header_buffer = (unsigned char *)bs.buffer;
return 0;
}
-int QuickSyncEncoderImpl::render_packedsequence()
+int QuickSyncEncoderImpl::render_packedsequence(YCbCrLumaCoefficients ycbcr_coefficients)
{
VAEncPackedHeaderParameterBuffer packedheader_param_buffer;
VABufferID packedseq_para_bufid, packedseq_data_bufid, render_id[2];
unsigned char *packedseq_buffer = NULL;
VAStatus va_status;
- length_in_bits = build_packed_seq_buffer(&packedseq_buffer);
+ length_in_bits = build_packed_seq_buffer(ycbcr_coefficients, &packedseq_buffer);
packedheader_param_buffer.type = VAEncPackedHeaderSequence;
return 0;
}
-QuickSyncEncoderImpl::QuickSyncEncoderImpl(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const string &va_display, int width, int height, AVOutputFormat *oformat, X264Encoder *x264_encoder, DiskSpaceEstimator *disk_space_estimator)
+QuickSyncEncoderImpl::QuickSyncEncoderImpl(const std::string &filename, ResourcePool *resource_pool, QSurface *surface, const string &va_display, int width, int height, AVOutputFormat *oformat, X264Encoder *x264_encoder, DiskSpaceEstimator *disk_space_estimator)
: current_storage_frame(0), resource_pool(resource_pool), surface(surface), x264_encoder(x264_encoder), frame_width(width), frame_height(height), disk_space_estimator(disk_space_estimator)
{
file_audio_encoder.reset(new AudioEncoder(AUDIO_OUTPUT_CODEC_NAME, DEFAULT_AUDIO_OUTPUT_BIT_RATE, oformat));
}
}
-bool QuickSyncEncoderImpl::begin_frame(int64_t pts, int64_t duration, const vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex)
+bool QuickSyncEncoderImpl::begin_frame(int64_t pts, int64_t duration, YCbCrLumaCoefficients ycbcr_coefficients, const vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex)
{
assert(!is_shutdown);
GLSurface *surf = nullptr;
glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, surf->cbcr_egl_image);
}
- current_video_frame = PendingFrame{ {}, input_frames, pts, duration };
+ current_video_frame = PendingFrame{ {}, input_frames, pts, duration, ycbcr_coefficients };
return true;
}
}
last_dts = dts;
- encode_frame(frame, quicksync_encoding_frame_num, quicksync_display_frame_num, gop_start_display_frame_num, frame_type, frame.pts, dts, frame.duration);
+ encode_frame(frame, quicksync_encoding_frame_num, quicksync_display_frame_num, gop_start_display_frame_num, frame_type, frame.pts, dts, frame.duration, frame.ycbcr_coefficients);
++quicksync_encoding_frame_num;
}
}
PendingFrame frame = move(pending_frame.second);
int64_t dts = last_dts + (TIMEBASE / MAX_FPS);
printf("Finalizing encode: Encoding leftover frame %d as P-frame instead of B-frame.\n", display_frame_num);
- encode_frame(frame, encoding_frame_num++, display_frame_num, gop_start_display_frame_num, FRAME_P, frame.pts, dts, frame.duration);
+ encode_frame(frame, encoding_frame_num++, display_frame_num, gop_start_display_frame_num, FRAME_P, frame.pts, dts, frame.duration, frame.ycbcr_coefficients);
last_dts = dts;
}
}
if (global_flags.uncompressed_video_to_http) {
add_packet_for_uncompressed_frame(pts, duration, data);
} else if (global_flags.x264_video_to_http) {
- x264_encoder->add_frame(pts, duration, data, received_ts);
+ x264_encoder->add_frame(pts, duration, frame.ycbcr_coefficients, data, received_ts);
}
}
void QuickSyncEncoderImpl::encode_frame(QuickSyncEncoderImpl::PendingFrame frame, int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num,
- int frame_type, int64_t pts, int64_t dts, int64_t duration)
+ int frame_type, int64_t pts, int64_t dts, int64_t duration, YCbCrLumaCoefficients ycbcr_coefficients)
{
const ReceivedTimestamps received_ts = find_received_timestamp(frame.input_frames);
// FIXME: If the mux wants global headers, we should not put the
// SPS/PPS before each IDR frame, but rather put it into the
// codec extradata (formatted differently?).
+ //
+ // NOTE: If we change ycbcr_coefficients, it will not take effect
+ // before the next IDR frame. This is acceptable, as it should only
+ // happen on a mode change, which is rare.
render_sequence();
render_picture(surf, frame_type, display_frame_num, gop_start_display_frame_num);
if (h264_packedheader) {
- render_packedsequence();
+ render_packedsequence(ycbcr_coefficients);
render_packedpicture();
}
} else {
tmp.pts = pts;
tmp.dts = dts;
tmp.duration = duration;
+ tmp.ycbcr_coefficients = ycbcr_coefficients;
tmp.received_ts = received_ts;
tmp.ref_display_frame_numbers = move(ref_display_frame_numbers);
storage_task_enqueue(move(tmp));
}
// Proxy object.
-QuickSyncEncoder::QuickSyncEncoder(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const string &va_display, int width, int height, AVOutputFormat *oformat, X264Encoder *x264_encoder, DiskSpaceEstimator *disk_space_estimator)
+QuickSyncEncoder::QuickSyncEncoder(const std::string &filename, ResourcePool *resource_pool, QSurface *surface, const string &va_display, int width, int height, AVOutputFormat *oformat, X264Encoder *x264_encoder, DiskSpaceEstimator *disk_space_estimator)
: impl(new QuickSyncEncoderImpl(filename, resource_pool, surface, va_display, width, height, oformat, x264_encoder, disk_space_estimator)) {}
// Must be defined here because unique_ptr<> destructor needs to know the impl.
impl->add_audio(pts, audio);
}
-bool QuickSyncEncoder::begin_frame(int64_t pts, int64_t duration, const vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex)
+bool QuickSyncEncoder::begin_frame(int64_t pts, int64_t duration, YCbCrLumaCoefficients ycbcr_coefficients, const vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex)
{
- return impl->begin_frame(pts, duration, input_frames, y_tex, cbcr_tex);
+ return impl->begin_frame(pts, duration, ycbcr_coefficients, input_frames, y_tex, cbcr_tex);
}
RefCountedGLsync QuickSyncEncoder::end_frame()
#define _H264ENCODE_H
#include <epoxy/gl.h>
+#include <movit/image_format.h>
#include <stdbool.h>
#include <stdint.h>
#include <memory>
void set_stream_mux(Mux *mux); // Does not take ownership. Must be called unless x264 is used for the stream.
void add_audio(int64_t pts, std::vector<float> audio);
- bool begin_frame(int64_t pts, int64_t duration, const std::vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex);
+ bool begin_frame(int64_t pts, int64_t duration, movit::YCbCrLumaCoefficients ycbcr_coefficients, const std::vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex);
RefCountedGLsync end_frame();
void shutdown(); // Blocking. Does not require an OpenGL context.
void release_gl_resources(); // Requires an OpenGL context. Must be run after shutdown.
#define _QUICKSYNC_ENCODER_IMPL_H 1
#include <epoxy/egl.h>
+#include <movit/image_format.h>
#include <va/va.h>
#include <condition_variable>
QuickSyncEncoderImpl(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const std::string &va_display, int width, int height, AVOutputFormat *oformat, X264Encoder *x264_encoder, DiskSpaceEstimator *disk_space_estimator);
~QuickSyncEncoderImpl();
void add_audio(int64_t pts, std::vector<float> audio);
- bool begin_frame(int64_t pts, int64_t duration, const std::vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex);
+ bool begin_frame(int64_t pts, int64_t duration, movit::YCbCrLumaCoefficients ycbcr_coefficients, const std::vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex);
RefCountedGLsync end_frame();
void shutdown();
void release_gl_resources();
int frame_type;
std::vector<float> audio;
int64_t pts, dts, duration;
+ movit::YCbCrLumaCoefficients ycbcr_coefficients;
ReceivedTimestamps received_ts;
std::vector<size_t> ref_display_frame_numbers;
};
RefCountedGLsync fence;
std::vector<RefCountedFrame> input_frames;
int64_t pts, duration;
+ movit::YCbCrLumaCoefficients ycbcr_coefficients;
};
struct GLSurface {
VASurfaceID src_surface, ref_surface;
void add_packet_for_uncompressed_frame(int64_t pts, int64_t duration, const uint8_t *data);
void pass_frame(PendingFrame frame, int display_frame_num, int64_t pts, int64_t duration);
void encode_frame(PendingFrame frame, int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num,
- int frame_type, int64_t pts, int64_t dts, int64_t duration);
+ int frame_type, int64_t pts, int64_t dts, int64_t duration, movit::YCbCrLumaCoefficients ycbcr_coefficients);
void storage_task_thread();
void storage_task_enqueue(storage_task task);
void save_codeddata(GLSurface *surf, storage_task task);
- int render_packedsequence();
+ int render_packedsequence(movit::YCbCrLumaCoefficients ycbcr_coefficients);
int render_packedpicture();
void render_packedslice();
int render_sequence();
int render_picture(GLSurface *surf, int frame_type, int display_frame_num, int gop_start_display_frame_num);
- void sps_rbsp(bitstream *bs);
+ void sps_rbsp(movit::YCbCrLumaCoefficients ycbcr_coefficients, bitstream *bs);
void pps_rbsp(bitstream *bs);
int build_packed_pic_buffer(unsigned char **header_buffer);
int render_slice(int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num, int frame_type);
void slice_header(bitstream *bs);
- int build_packed_seq_buffer(unsigned char **header_buffer);
+ int build_packed_seq_buffer(movit::YCbCrLumaCoefficients ycbcr_coefficients, unsigned char **header_buffer);
int build_packed_slice_buffer(unsigned char **header_buffer);
int init_va(const std::string &va_display);
int deinit_va();
// happens in a pass not run by Movit (see ChromaSubsampler::subsample_chroma()).
output_ycbcr_format.chroma_subsampling_x = 1;
output_ycbcr_format.chroma_subsampling_y = 1;
+
+ // This will be overridden if HDMI/SDI output is in force.
if (global_flags.ycbcr_rec709_coefficients) {
output_ycbcr_format.luma_coefficients = YCBCR_REC_709;
} else {
output_ycbcr_format.luma_coefficients = YCBCR_REC_601;
}
+
output_ycbcr_format.full_range = false;
output_ycbcr_format.num_levels = 256;
stream_audio_encoder->encode_audio(audio, pts + quicksync_encoder->global_delay());
}
-bool VideoEncoder::begin_frame(int64_t pts, int64_t duration, const std::vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex)
+bool VideoEncoder::begin_frame(int64_t pts, int64_t duration, movit::YCbCrLumaCoefficients ycbcr_coefficients, const std::vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex)
{
lock_guard<mutex> lock(qs_mu);
qs_needing_cleanup.clear(); // Since we have an OpenGL context here, and are called regularly.
- return quicksync_encoder->begin_frame(pts, duration, input_frames, y_tex, cbcr_tex);
+ return quicksync_encoder->begin_frame(pts, duration, ycbcr_coefficients, input_frames, y_tex, cbcr_tex);
}
RefCountedGLsync VideoEncoder::end_frame()
#define _VIDEO_ENCODER_H
#include <epoxy/gl.h>
+#include <movit/image_format.h>
#include <stdbool.h>
#include <stdint.h>
#include <atomic>
// Allocate a frame to render into. The returned two textures
// are yours to render into (build them into an FBO).
// Call end_frame() when you're done.
- bool begin_frame(int64_t pts, int64_t duration, const std::vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex);
+ bool begin_frame(int64_t pts, int64_t duration, movit::YCbCrLumaCoefficients ycbcr_coefficients, const std::vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex);
// Call after you are done rendering into the frame; at this point,
// y_tex and cbcr_tex will be assumed done, and handed over to the
#include <libavformat/avformat.h>
}
+using namespace movit;
using namespace std;
using namespace std::chrono;
encoder_thread.join();
}
-void X264Encoder::add_frame(int64_t pts, int64_t duration, const uint8_t *data, const ReceivedTimestamps &received_ts)
+void X264Encoder::add_frame(int64_t pts, int64_t duration, YCbCrLumaCoefficients ycbcr_coefficients, const uint8_t *data, const ReceivedTimestamps &received_ts)
{
QueuedFrame qf;
qf.pts = pts;
qf.duration = duration;
+ qf.ycbcr_coefficients = ycbcr_coefficients;
qf.received_ts = received_ts;
{
// See if we have a new bitrate to change to.
unsigned new_rate = new_bitrate_kbit.exchange(0); // Read and clear.
if (new_rate != 0) {
- if (speed_control) {
- speed_control->set_config_override_function([new_rate](x264_param_t *param) {
- param->rc.i_bitrate = new_rate;
- update_vbv_settings(param);
- });
+ bitrate_override_func = [new_rate](x264_param_t *param) {
+ param->rc.i_bitrate = new_rate;
+ update_vbv_settings(param);
+ };
+ }
+
+ auto ycbcr_coefficients_override_func = [qf](x264_param_t *param) {
+ if (qf.ycbcr_coefficients == YCBCR_REC_709) {
+ param->vui.i_colmatrix = 1; // BT.709.
} else {
- x264_param_t param;
- x264_encoder_parameters(x264, ¶m);
- param.rc.i_bitrate = new_rate;
- update_vbv_settings(¶m);
- x264_encoder_reconfig(x264, ¶m);
+ assert(qf.ycbcr_coefficients == YCBCR_REC_601);
+ param->vui.i_colmatrix = 6; // BT.601/SMPTE 170M.
+ }
+ };
+
+ if (speed_control) {
+ speed_control->set_config_override_function([this, ycbcr_coefficients_override_func](x264_param_t *param) {
+ if (bitrate_override_func) {
+ bitrate_override_func(param);
+ }
+ ycbcr_coefficients_override_func(param);
+ });
+ } else {
+ x264_param_t param;
+ x264_encoder_parameters(x264, ¶m);
+ if (bitrate_override_func) {
+ bitrate_override_func(¶m);
}
+ ycbcr_coefficients_override_func(¶m);
+ x264_encoder_reconfig(x264, ¶m);
}
if (speed_control) {
#include <libavformat/avformat.h>
}
+#include <movit/image_format.h>
+
#include "print_latency.h"
class Mux;
// <data> is taken to be raw NV12 data of WIDTHxHEIGHT resolution.
// Does not block.
- void add_frame(int64_t pts, int64_t duration, const uint8_t *data, const ReceivedTimestamps &received_ts);
+ void add_frame(int64_t pts, int64_t duration, movit::YCbCrLumaCoefficients ycbcr_coefficients, const uint8_t *data, const ReceivedTimestamps &received_ts);
std::string get_global_headers() const {
while (!x264_init_done) {
private:
struct QueuedFrame {
int64_t pts, duration;
+ movit::YCbCrLumaCoefficients ycbcr_coefficients;
uint8_t *data;
ReceivedTimestamps received_ts;
};
x264_t *x264;
std::unique_ptr<X264SpeedControl> speed_control;
+ std::function<void(x264_param_t *)> bitrate_override_func;
+
std::atomic<unsigned> new_bitrate_kbit{0}; // 0 for no change.
// Protects everything below it.