This fixes a problem where the ResourcePool would not be aware
that a given texture had been deleted (and a new one come back
using the same texture number), and thus happily continue
to give out cached FBOs from it, even though OpenGL had unlinked
those FBOs when the texture was deleted.
//#include "sysdeps.h"
#include "quicksync_encoder.h"
//#include "sysdeps.h"
#include "quicksync_encoder.h"
+#include <movit/resource_pool.h>
#include <movit/util.h>
#include <EGL/eglplatform.h>
#include <X11/X.h>
#include <movit/util.h>
#include <EGL/eglplatform.h>
#include <X11/X.h>
class QuickSyncEncoderImpl {
public:
class QuickSyncEncoderImpl {
public:
- QuickSyncEncoderImpl(const std::string &filename, QSurface *surface, const string &va_display, int width, int height, Mux *stream_mux, AudioEncoder *stream_audio_encoder, X264Encoder *x264_encoder);
+ QuickSyncEncoderImpl(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const string &va_display, int width, int height, Mux *stream_mux, AudioEncoder *stream_audio_encoder, X264Encoder *x264_encoder);
~QuickSyncEncoderImpl();
void add_audio(int64_t pts, vector<float> audio);
bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex);
~QuickSyncEncoderImpl();
void add_audio(int64_t pts, vector<float> audio);
bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex);
map<int, PendingFrame> pending_video_frames; // under frame_queue_mutex
map<int64_t, vector<float>> pending_audio_frames; // under frame_queue_mutex
map<int, PendingFrame> pending_video_frames; // under frame_queue_mutex
map<int64_t, vector<float>> pending_audio_frames; // under frame_queue_mutex
+ movit::ResourcePool *resource_pool;
QSurface *surface;
unique_ptr<AudioEncoder> file_audio_encoder;
QSurface *surface;
unique_ptr<AudioEncoder> file_audio_encoder;
//glGenFramebuffers(SURFACE_NUM, fbos);
for (i = 0; i < SURFACE_NUM; i++) {
//glGenFramebuffers(SURFACE_NUM, fbos);
for (i = 0; i < SURFACE_NUM; i++) {
- glGenTextures(1, &gl_surfaces[i].y_tex);
- glGenTextures(1, &gl_surfaces[i].cbcr_tex);
-
- if (!use_zerocopy) {
- // Create Y image.
- glBindTexture(GL_TEXTURE_2D, gl_surfaces[i].y_tex);
- glTexStorage2D(GL_TEXTURE_2D, 1, GL_R8, frame_width, frame_height);
-
- // Create CbCr image.
- glBindTexture(GL_TEXTURE_2D, gl_surfaces[i].cbcr_tex);
- glTexStorage2D(GL_TEXTURE_2D, 1, GL_RG8, frame_width / 2, frame_height / 2);
+ if (use_zerocopy) {
+ gl_surfaces[i].y_tex = resource_pool->create_2d_texture(GL_R8, 1, 1);
+ gl_surfaces[i].cbcr_tex = resource_pool->create_2d_texture(GL_RG8, 1, 1);
+ } else {
+ gl_surfaces[i].y_tex = resource_pool->create_2d_texture(GL_R8, frame_width, frame_height);
+ gl_surfaces[i].cbcr_tex = resource_pool->create_2d_texture(GL_RG8, frame_width / 2, frame_height / 2);
// Generate a PBO to read into. It doesn't necessarily fit 1:1 with the VA-API
// buffers, due to potentially differing pitch.
// Generate a PBO to read into. It doesn't necessarily fit 1:1 with the VA-API
// buffers, due to potentially differing pitch.
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
glDeleteBuffers(1, &gl_surfaces[i].pbo);
}
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
glDeleteBuffers(1, &gl_surfaces[i].pbo);
}
- glDeleteTextures(1, &gl_surfaces[i].y_tex);
- glDeleteTextures(1, &gl_surfaces[i].cbcr_tex);
+ resource_pool->release_2d_texture(gl_surfaces[i].y_tex);
+ resource_pool->release_2d_texture(gl_surfaces[i].cbcr_tex);
}
vaDestroyContext(va_dpy, context_id);
}
vaDestroyContext(va_dpy, context_id);
-QuickSyncEncoderImpl::QuickSyncEncoderImpl(const std::string &filename, QSurface *surface, const string &va_display, int width, int height, Mux *stream_mux, AudioEncoder *stream_audio_encoder, X264Encoder *x264_encoder)
- : current_storage_frame(0), surface(surface), stream_audio_encoder(stream_audio_encoder), x264_encoder(x264_encoder), stream_mux(stream_mux), frame_width(width), frame_height(height)
+QuickSyncEncoderImpl::QuickSyncEncoderImpl(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const string &va_display, int width, int height, Mux *stream_mux, AudioEncoder *stream_audio_encoder, X264Encoder *x264_encoder)
+ : current_storage_frame(0), resource_pool(resource_pool), surface(surface), stream_audio_encoder(stream_audio_encoder), x264_encoder(x264_encoder), stream_mux(stream_mux), frame_width(width), frame_height(height)
{
file_audio_encoder.reset(new AudioEncoder(AUDIO_OUTPUT_CODEC_NAME, DEFAULT_AUDIO_OUTPUT_BIT_RATE));
open_output_file(filename);
{
file_audio_encoder.reset(new AudioEncoder(AUDIO_OUTPUT_CODEC_NAME, DEFAULT_AUDIO_OUTPUT_BIT_RATE));
open_output_file(filename);
-QuickSyncEncoder::QuickSyncEncoder(const std::string &filename, QSurface *surface, const string &va_display, int width, int height, Mux *stream_mux, AudioEncoder *stream_audio_encoder, X264Encoder *x264_encoder)
- : impl(new QuickSyncEncoderImpl(filename, surface, va_display, width, height, stream_mux, stream_audio_encoder, x264_encoder)) {}
+QuickSyncEncoder::QuickSyncEncoder(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const string &va_display, int width, int height, Mux *stream_mux, AudioEncoder *stream_audio_encoder, X264Encoder *x264_encoder)
+ : impl(new QuickSyncEncoderImpl(filename, resource_pool, surface, va_display, width, height, stream_mux, stream_audio_encoder, x264_encoder)) {}
// Must be defined here because unique_ptr<> destructor needs to know the impl.
QuickSyncEncoder::~QuickSyncEncoder() {}
// Must be defined here because unique_ptr<> destructor needs to know the impl.
QuickSyncEncoder::~QuickSyncEncoder() {}
class QSurface;
class X264Encoder;
class QSurface;
class X264Encoder;
+namespace movit {
+class ResourcePool;
+} // namespace movit
+
// This is just a pimpl, because including anything X11-related in a .h file
// tends to trip up Qt. All the real logic is in QuickSyncEncoderImpl, defined in the
// .cpp file.
class QuickSyncEncoder {
public:
// This is just a pimpl, because including anything X11-related in a .h file
// tends to trip up Qt. All the real logic is in QuickSyncEncoderImpl, defined in the
// .cpp file.
class QuickSyncEncoder {
public:
- QuickSyncEncoder(const std::string &filename, QSurface *surface, const std::string &va_display, int width, int height, Mux *stream_mux, AudioEncoder *stream_audio_encoder, X264Encoder *x264_encoder);
+ QuickSyncEncoder(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const std::string &va_display, int width, int height, Mux *stream_mux, AudioEncoder *stream_audio_encoder, X264Encoder *x264_encoder);
~QuickSyncEncoder();
void add_audio(int64_t pts, std::vector<float> audio);
~QuickSyncEncoder();
void add_audio(int64_t pts, std::vector<float> audio);
#include "x264_encoder.h"
using namespace std;
#include "x264_encoder.h"
using namespace std;
-VideoEncoder::VideoEncoder(QSurface *surface, const std::string &va_display, int width, int height, HTTPD *httpd)
- : surface(surface), va_display(va_display), width(width), height(height), httpd(httpd)
+VideoEncoder::VideoEncoder(ResourcePool *resource_pool, QSurface *surface, const std::string &va_display, int width, int height, HTTPD *httpd)
+ : resource_pool(resource_pool), surface(surface), va_display(va_display), width(width), height(height), httpd(httpd)
}
string filename = generate_local_dump_filename(/*frame=*/0);
}
string filename = generate_local_dump_filename(/*frame=*/0);
- quicksync_encoder.reset(new QuickSyncEncoder(filename, surface, va_display, width, height, stream_mux.get(), stream_audio_encoder.get(), x264_encoder.get()));
+ quicksync_encoder.reset(new QuickSyncEncoder(filename, resource_pool, surface, va_display, width, height, stream_mux.get(), stream_audio_encoder.get(), x264_encoder.get()));
}
VideoEncoder::~VideoEncoder()
}
VideoEncoder::~VideoEncoder()
string filename = generate_local_dump_filename(frame);
printf("Starting new recording: %s\n", filename.c_str());
quicksync_encoder->shutdown();
string filename = generate_local_dump_filename(frame);
printf("Starting new recording: %s\n", filename.c_str());
quicksync_encoder->shutdown();
- quicksync_encoder.reset(new QuickSyncEncoder(filename, surface, va_display, width, height, stream_mux.get(), stream_audio_encoder.get(), x264_encoder.get()));
+ quicksync_encoder.reset(new QuickSyncEncoder(filename, resource_pool, surface, va_display, width, height, stream_mux.get(), stream_audio_encoder.get(), x264_encoder.get()));
}
void VideoEncoder::add_audio(int64_t pts, std::vector<float> audio)
}
void VideoEncoder::add_audio(int64_t pts, std::vector<float> audio)
class QuickSyncEncoder;
class X264Encoder;
class QuickSyncEncoder;
class X264Encoder;
+namespace movit {
+class ResourcePool;
+} // namespace movit
+
class VideoEncoder : public KeyFrameSignalReceiver {
public:
class VideoEncoder : public KeyFrameSignalReceiver {
public:
- VideoEncoder(QSurface *surface, const std::string &va_display, int width, int height, HTTPD *httpd);
+ VideoEncoder(movit::ResourcePool *resource_pool, QSurface *surface, const std::string &va_display, int width, int height, HTTPD *httpd);
~VideoEncoder();
void add_audio(int64_t pts, std::vector<float> audio);
~VideoEncoder();
void add_audio(int64_t pts, std::vector<float> audio);
int write_packet(uint8_t *buf, int buf_size);
std::unique_ptr<QuickSyncEncoder> quicksync_encoder;
int write_packet(uint8_t *buf, int buf_size);
std::unique_ptr<QuickSyncEncoder> quicksync_encoder;
+ movit::ResourcePool *resource_pool;
QSurface *surface;
std::string va_display;
int width, height;
QSurface *surface;
std::string va_display;
int width, height;