-//#include "sysdeps.h"
#include "quicksync_encoder.h"
-#include <movit/resource_pool.h>
+#include <movit/resource_pool.h> // Must be above the Xlib includes.
#include <movit/util.h>
+
#include <EGL/eglplatform.h>
-#include <X11/X.h>
#include <X11/Xlib.h>
#include <assert.h>
#include <epoxy/egl.h>
-#include <libdrm/drm_fourcc.h>
+#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <fcntl.h>
+#include <unistd.h>
#include <va/va.h>
#include <va/va_drm.h>
#include <va/va_drmcommon.h>
#include <va/va_x11.h>
#include <algorithm>
#include <condition_variable>
+#include <cstddef>
#include <cstdint>
+#include <functional>
#include <map>
#include <memory>
#include <mutex>
#include <queue>
+#include <stack>
#include <string>
#include <thread>
#include <utility>
+extern "C" {
+
+#include <libavcodec/avcodec.h>
+#include <libavformat/avio.h>
+#include <libavutil/error.h>
+#include <libdrm/drm_fourcc.h>
+
+} // namespace
+
#include "audio_encoder.h"
#include "context.h"
#include "defs.h"
+#include "disk_space_estimator.h"
+#include "ffmpeg_raii.h"
#include "flags.h"
#include "mux.h"
+#include "ref_counted_frame.h"
#include "timebase.h"
#include "x264_encoder.h"
using namespace std;
+using namespace std::placeholders;
class QOpenGLContext;
class QSurface;
class QuickSyncEncoderImpl {
public:
- QuickSyncEncoderImpl(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const string &va_display, int width, int height, Mux *stream_mux, AudioEncoder *stream_audio_encoder, X264Encoder *x264_encoder);
+ QuickSyncEncoderImpl(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const string &va_display, int width, int height, AVOutputFormat *oformat, X264Encoder *x264_encoder, DiskSpaceEstimator *disk_space_estimator);
~QuickSyncEncoderImpl();
void add_audio(int64_t pts, vector<float> audio);
bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex);
RefCountedGLsync end_frame(int64_t pts, int64_t duration, const vector<RefCountedFrame> &input_frames);
void shutdown();
+ void release_gl_resources();
+ void set_stream_mux(Mux *mux)
+ {
+ stream_mux = mux;
+ }
+
+ // So we never get negative dts.
+ int64_t global_delay() const {
+ return int64_t(ip_period - 1) * (TIMEBASE / MAX_FPS);
+ }
private:
struct storage_task {
int64_t pts, duration;
};
- // So we never get negative dts.
- int64_t global_delay() const {
- return int64_t(ip_period - 1) * (TIMEBASE / MAX_FPS);
- }
-
void open_output_file(const std::string &filename);
void encode_thread_func();
void encode_remaining_frames_as_p(int encoding_frame_num, int gop_start_display_frame_num, int64_t last_dts);
void encode_frame(PendingFrame frame, int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num,
int frame_type, int64_t pts, int64_t dts, int64_t duration);
void storage_task_thread();
- void encode_remaining_audio();
void storage_task_enqueue(storage_task task);
void save_codeddata(storage_task task);
int render_packedsequence();
VADisplay va_open_display(const string &va_display);
void va_close_display(VADisplay va_dpy);
int setup_encode();
- int release_encode();
+ void release_encode();
void update_ReferenceFrames(int frame_type);
int update_RefPicList(int frame_type);
bool is_shutdown = false;
+ bool has_released_gl_resources = false;
bool use_zerocopy;
int drm_fd = -1;
int current_storage_frame;
map<int, PendingFrame> pending_video_frames; // under frame_queue_mutex
- map<int64_t, vector<float>> pending_audio_frames; // under frame_queue_mutex
movit::ResourcePool *resource_pool;
QSurface *surface;
unique_ptr<AudioEncoder> file_audio_encoder;
- AudioEncoder *stream_audio_encoder;
unique_ptr<FrameReorderer> reorderer;
X264Encoder *x264_encoder; // nullptr if not using x264.
- Mux* stream_mux; // To HTTP.
+ Mux* stream_mux = nullptr; // To HTTP.
unique_ptr<Mux> file_mux; // To local disk.
Display *x11_display = nullptr;
int frame_height;
int frame_width_mbaligned;
int frame_height_mbaligned;
+
+ DiskSpaceEstimator *disk_space_estimator;
};
// Supposedly vaRenderPicture() is supposed to destroy the buffer implicitly,
stream_mux->add_packet(pkt, task.pts + global_delay(), task.dts + global_delay());
}
}
- // Encode and add all audio frames up to and including the pts of this video frame.
- for ( ;; ) {
- int64_t audio_pts;
- vector<float> audio;
- {
- unique_lock<mutex> lock(frame_queue_mutex);
- frame_queue_nonempty.wait(lock, [this]{ return storage_thread_should_quit || !pending_audio_frames.empty(); });
- if (storage_thread_should_quit && pending_audio_frames.empty()) return;
- auto it = pending_audio_frames.begin();
- if (it->first > task.pts) break;
- audio_pts = it->first;
- audio = move(it->second);
- pending_audio_frames.erase(it);
- }
-
- file_audio_encoder->encode_audio(audio, audio_pts + global_delay());
- stream_audio_encoder->encode_audio(audio, audio_pts + global_delay());
-
- if (audio_pts == task.pts) break;
- }
}
}
}
-int QuickSyncEncoderImpl::release_encode()
+void QuickSyncEncoderImpl::release_encode()
{
for (unsigned i = 0; i < SURFACE_NUM; i++) {
vaDestroyBuffer(va_dpy, gl_surfaces[i].coded_buf);
vaDestroySurfaces(va_dpy, &gl_surfaces[i].src_surface, 1);
vaDestroySurfaces(va_dpy, &gl_surfaces[i].ref_surface, 1);
+ }
+ vaDestroyContext(va_dpy, context_id);
+ vaDestroyConfig(va_dpy, config_id);
+}
+
+void QuickSyncEncoderImpl::release_gl_resources()
+{
+ assert(is_shutdown);
+ if (has_released_gl_resources) {
+ return;
+ }
+
+ for (unsigned i = 0; i < SURFACE_NUM; i++) {
if (!use_zerocopy) {
glBindBuffer(GL_PIXEL_PACK_BUFFER, gl_surfaces[i].pbo);
glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
resource_pool->release_2d_texture(gl_surfaces[i].cbcr_tex);
}
- vaDestroyContext(va_dpy, context_id);
- vaDestroyConfig(va_dpy, config_id);
-
- return 0;
+ has_released_gl_resources = true;
}
int QuickSyncEncoderImpl::deinit_va()
return 0;
}
-namespace {
-
-} // namespace
-
-QuickSyncEncoderImpl::QuickSyncEncoderImpl(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const string &va_display, int width, int height, Mux *stream_mux, AudioEncoder *stream_audio_encoder, X264Encoder *x264_encoder)
- : current_storage_frame(0), resource_pool(resource_pool), surface(surface), stream_audio_encoder(stream_audio_encoder), x264_encoder(x264_encoder), stream_mux(stream_mux), frame_width(width), frame_height(height)
+QuickSyncEncoderImpl::QuickSyncEncoderImpl(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const string &va_display, int width, int height, AVOutputFormat *oformat, X264Encoder *x264_encoder, DiskSpaceEstimator *disk_space_estimator)
+ : current_storage_frame(0), resource_pool(resource_pool), surface(surface), x264_encoder(x264_encoder), frame_width(width), frame_height(height), disk_space_estimator(disk_space_estimator)
{
- file_audio_encoder.reset(new AudioEncoder(AUDIO_OUTPUT_CODEC_NAME, DEFAULT_AUDIO_OUTPUT_BIT_RATE));
+ file_audio_encoder.reset(new AudioEncoder(AUDIO_OUTPUT_CODEC_NAME, DEFAULT_AUDIO_OUTPUT_BIT_RATE, oformat));
open_output_file(filename);
file_audio_encoder->add_mux(file_mux.get());
QuickSyncEncoderImpl::~QuickSyncEncoderImpl()
{
shutdown();
+ release_gl_resources();
}
bool QuickSyncEncoderImpl::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
void QuickSyncEncoderImpl::add_audio(int64_t pts, vector<float> audio)
{
assert(!is_shutdown);
- {
- unique_lock<mutex> lock(frame_queue_mutex);
- pending_audio_frames[pts] = move(audio);
- }
- frame_queue_nonempty.notify_all();
+ file_audio_encoder->encode_audio(audio, pts + global_delay());
}
RefCountedGLsync QuickSyncEncoderImpl::end_frame(int64_t pts, int64_t duration, const vector<RefCountedFrame> &input_frames)
storage_task_queue_changed.notify_all();
}
storage_thread.join();
- encode_remaining_audio();
+
+ // Encode any leftover audio in the queues, and also any delayed frames.
+ file_audio_encoder->encode_last_audio();
release_encode();
deinit_va();
exit(1);
}
- file_mux.reset(new Mux(avctx, frame_width, frame_height, Mux::CODEC_H264, file_audio_encoder->get_codec(), TIMEBASE, DEFAULT_AUDIO_OUTPUT_BIT_RATE, nullptr));
+ string video_extradata = ""; // FIXME: See other comment about global headers.
+ AVCodecParametersWithDeleter audio_codecpar = file_audio_encoder->get_codec_parameters();
+ file_mux.reset(new Mux(avctx, frame_width, frame_height, Mux::CODEC_H264, video_extradata, audio_codecpar.get(), TIMEBASE,
+ std::bind(&DiskSpaceEstimator::report_write, disk_space_estimator, filename, _1)));
}
void QuickSyncEncoderImpl::encode_thread_func()
}
}
-void QuickSyncEncoderImpl::encode_remaining_audio()
-{
- // This really ought to be empty by now, but just to be sure...
- for (auto &pending_frame : pending_audio_frames) {
- int64_t audio_pts = pending_frame.first;
- vector<float> audio = move(pending_frame.second);
-
- file_audio_encoder->encode_audio(audio, audio_pts + global_delay());
- if (stream_audio_encoder) {
- stream_audio_encoder->encode_audio(audio, audio_pts + global_delay());
- }
- }
- pending_audio_frames.clear();
-
- // Encode any leftover audio in the queues, and also any delayed frames.
- // Note: stream_audio_encoder is not owned by us, so don't call encode_last_audio().
- file_audio_encoder->encode_last_audio();
-}
-
void QuickSyncEncoderImpl::add_packet_for_uncompressed_frame(int64_t pts, int64_t duration, const uint8_t *data)
{
AVPacket pkt;
CHECK_VASTATUS(va_status, "vaBeginPicture");
if (frame_type == FRAME_IDR) {
+ // FIXME: If the mux wants global headers, we should not put the
+ // SPS/PPS before each IDR frame, but rather put it into the
+ // codec extradata (formatted differently?).
render_sequence();
render_picture(frame_type, display_frame_num, gop_start_display_frame_num);
if (h264_packedheader) {
}
// Proxy object.
-QuickSyncEncoder::QuickSyncEncoder(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const string &va_display, int width, int height, Mux *stream_mux, AudioEncoder *stream_audio_encoder, X264Encoder *x264_encoder)
- : impl(new QuickSyncEncoderImpl(filename, resource_pool, surface, va_display, width, height, stream_mux, stream_audio_encoder, x264_encoder)) {}
+QuickSyncEncoder::QuickSyncEncoder(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const string &va_display, int width, int height, AVOutputFormat *oformat, X264Encoder *x264_encoder, DiskSpaceEstimator *disk_space_estimator)
+ : impl(new QuickSyncEncoderImpl(filename, resource_pool, surface, va_display, width, height, oformat, x264_encoder, disk_space_estimator)) {}
// Must be defined here because unique_ptr<> destructor needs to know the impl.
QuickSyncEncoder::~QuickSyncEncoder() {}
{
impl->shutdown();
}
+
+void QuickSyncEncoder::set_stream_mux(Mux *mux)
+{
+ impl->set_stream_mux(mux);
+}
+
+int64_t QuickSyncEncoder::global_delay() const {
+ return impl->global_delay();
+}