//#include "sysdeps.h"
+#include "h264encode.h"
+
+#include <EGL/eglplatform.h>
+#include <X11/X.h>
+#include <X11/Xlib.h>
+#include <assert.h>
+#include <epoxy/egl.h>
+#include <libavcodec/avcodec.h>
+#include <libavformat/avio.h>
+#include <libavutil/mathematics.h>
+#include <libavutil/rational.h>
+#include <libdrm/drm_fourcc.h>
+#include <stdint.h>
#include <stdio.h>
-#include <string.h>
#include <stdlib.h>
-#include <getopt.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/mman.h>
-#include <fcntl.h>
-#include <assert.h>
-#include <pthread.h>
-#include <errno.h>
-#include <math.h>
+#include <string.h>
#include <va/va.h>
-#include <va/va_x11.h>
-#include <va/va_enc_h264.h>
#include <va/va_drmcommon.h>
-#include <libdrm/drm_fourcc.h>
-#include <thread>
+#include <va/va_enc_h264.h>
+#include <va/va_x11.h>
+#include <condition_variable>
#include <mutex>
#include <queue>
-#include <condition_variable>
-#include "h264encode.h"
+#include <string>
+#include <thread>
+
+#include "context.h"
+
+class QOpenGLContext;
+class QSurface;
#define CHECK_VASTATUS(va_status, func) \
if (va_status != VA_STATUS_SUCCESS) { \
static int h264_entropy_mode = 1; /* cabac */
static char *coded_fn = NULL;
-static FILE *coded_fp = NULL;
static int frame_width = 176;
static int frame_height = 144;
coded_fn = strdup("./test.264");
}
- /* store coded data into a file */
- coded_fp = fopen(coded_fn, "w+");
- if (coded_fp == NULL) {
- printf("Open file %s failed, exit\n", coded_fn);
- exit(1);
- }
frame_width_mbaligned = (frame_width + 15) & (~15);
frame_height_mbaligned = (frame_height + 15) & (~15);
-int H264Encoder::save_codeddata(unsigned long long display_order, unsigned long long encode_order, int frame_type)
+int H264Encoder::save_codeddata(storage_task task)
{
VACodedBufferSegment *buf_list = NULL;
VAStatus va_status;
string data;
- va_status = vaMapBuffer(va_dpy, gl_surfaces[display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list));
+ va_status = vaMapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list));
CHECK_VASTATUS(va_status, "vaMapBuffer");
while (buf_list != NULL) {
data.append(reinterpret_cast<const char *>(buf_list->buf), buf_list->size);
- if (coded_fp != nullptr)
- coded_size += fwrite(buf_list->buf, 1, buf_list->size, coded_fp);
buf_list = (VACodedBufferSegment *) buf_list->next;
frame_size += coded_size;
}
- vaUnmapBuffer(va_dpy, gl_surfaces[display_order % SURFACE_NUM].coded_buf);
-
- AVPacket pkt;
- memset(&pkt, 0, sizeof(pkt));
- pkt.buf = nullptr;
- pkt.pts = av_rescale_q(display_order, AVRational{1, frame_rate}, avstream->time_base);
- pkt.dts = av_rescale_q(encode_order, AVRational{1, frame_rate}, avstream->time_base);
- pkt.data = reinterpret_cast<uint8_t *>(&data[0]);
- pkt.size = data.size();
- pkt.stream_index = 0;
- if (frame_type == FRAME_IDR || frame_type == FRAME_I) {
- pkt.flags = AV_PKT_FLAG_KEY;
- } else {
- pkt.flags = 0;
+ vaUnmapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf);
+
+ {
+ // Add video.
+ AVPacket pkt;
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.buf = nullptr;
+ pkt.pts = av_rescale_q(task.display_order + 2, AVRational{1, frame_rate}, avstream_video->time_base); // FIXME: delay
+ pkt.dts = av_rescale_q(task.encode_order + 2, AVRational{1, frame_rate}, avstream_video->time_base); // FIXME: delay
+ pkt.data = reinterpret_cast<uint8_t *>(&data[0]);
+ pkt.size = data.size();
+ pkt.stream_index = 0;
+ if (task.frame_type == FRAME_IDR || task.frame_type == FRAME_I) {
+ pkt.flags = AV_PKT_FLAG_KEY;
+ } else {
+ pkt.flags = 0;
+ }
+ //pkt.duration = 1;
+ av_interleaved_write_frame(avctx, &pkt);
}
- pkt.duration = 1;
- av_interleaved_write_frame(avctx, &pkt);
+ {
+ // Add audio.
+ AVFrame *frame = avcodec_alloc_frame();
+ frame->nb_samples = task.audio.size() / 2;
+ frame->format = AV_SAMPLE_FMT_FLT;
+ frame->channel_layout = AV_CH_LAYOUT_STEREO;
+
+ unique_ptr<float[]> planar_samples(new float[task.audio.size()]);
+ avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_FLTP, (const uint8_t*)planar_samples.get(), task.audio.size() * sizeof(float), 0);
+ for (int i = 0; i < frame->nb_samples; ++i) {
+ planar_samples[i] = task.audio[i * 2 + 0];
+ planar_samples[i + frame->nb_samples] = task.audio[i * 2 + 1];
+ }
+
+ AVPacket pkt;
+ av_init_packet(&pkt);
+ pkt.data = nullptr;
+ pkt.size = 0;
+ int got_output;
+ avcodec_encode_audio2(avstream_audio->codec, &pkt, frame, &got_output);
+ if (got_output) {
+ pkt.pts = av_rescale_q(task.display_order, AVRational{1, frame_rate}, avstream_audio->time_base); // FIXME
+ pkt.stream_index = 1;
+ av_interleaved_write_frame(avctx, &pkt);
+ }
+ // TODO: Delayed frames.
+ avcodec_free_frame(&frame);
+ }
+
+ static FILE *audiofp = fopen("audio.raw", "wb");
+ fwrite(&task.audio[0], 4 * task.audio.size(), 1, audiofp);
#if 0
printf("\r "); /* return back to startpoint */
// this is weird. but it seems to put a new frame onto the queue
-void H264Encoder::storage_task_enqueue(unsigned long long display_order, unsigned long long encode_order, int frame_type)
+void H264Encoder::storage_task_enqueue(storage_task task)
{
std::unique_lock<std::mutex> lock(storage_task_queue_mutex);
-
- storage_task tmp;
- tmp.display_order = display_order;
- tmp.encode_order = encode_order;
- tmp.frame_type = frame_type;
- storage_task_queue.push(tmp);
- srcsurface_status[display_order % SURFACE_NUM] = SRC_SURFACE_IN_ENCODING;
-
+ storage_task_queue.push(move(task));
+ srcsurface_status[task.display_order % SURFACE_NUM] = SRC_SURFACE_IN_ENCODING;
storage_task_queue_changed.notify_all();
}
std::unique_lock<std::mutex> lock(storage_task_queue_mutex);
storage_task_queue_changed.wait(lock, [this]{ return storage_thread_should_quit || !storage_task_queue.empty(); });
if (storage_thread_should_quit) return;
- current = storage_task_queue.front();
+ current = move(storage_task_queue.front());
storage_task_queue.pop();
}
// waits for data, then saves it to disk.
va_status = vaSyncSurface(va_dpy, gl_surfaces[current.display_order % SURFACE_NUM].src_surface);
CHECK_VASTATUS(va_status, "vaSyncSurface");
- save_codeddata(current.display_order, current.encode_order, current.frame_type);
+ save_codeddata(move(current));
{
std::unique_lock<std::mutex> lock(storage_task_queue_mutex);
fprintf(stderr, "%s: avio_open2() failed\n", output_filename);
exit(1);
}
- AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_H264);
- avstream = avformat_new_stream(avctx, codec);
- if (avstream == nullptr) {
+ AVCodec *codec_video = avcodec_find_encoder(AV_CODEC_ID_H264);
+ avstream_video = avformat_new_stream(avctx, codec_video);
+ if (avstream_video == nullptr) {
+ fprintf(stderr, "%s: avformat_new_stream() failed\n", output_filename);
+ exit(1);
+ }
+ avstream_video->time_base = AVRational{1, frame_rate};
+ avstream_video->codec->width = width;
+ avstream_video->codec->height = height;
+ avstream_video->codec->time_base = AVRational{1, frame_rate};
+ avstream_video->codec->ticks_per_frame = 1; // or 2?
+
+ AVCodec *codec_audio = avcodec_find_encoder(AV_CODEC_ID_MP3);
+ avstream_audio = avformat_new_stream(avctx, codec_audio);
+ if (avstream_audio == nullptr) {
fprintf(stderr, "%s: avformat_new_stream() failed\n", output_filename);
exit(1);
}
- avstream->time_base = AVRational{1, frame_rate};
- avstream->codec->width = width;
- avstream->codec->height = height;
- avstream->codec->time_base = AVRational{1, frame_rate};
- avstream->codec->ticks_per_frame = 1; // or 2?
+ avstream_audio->time_base = AVRational{1, frame_rate};
+ avstream_audio->codec->bit_rate = 256000;
+ avstream_audio->codec->sample_rate = 48000;
+ avstream_audio->codec->sample_fmt = AV_SAMPLE_FMT_FLTP;
+ avstream_audio->codec->channels = 2;
+ avstream_audio->codec->channel_layout = AV_CH_LAYOUT_STEREO;
+ avstream_audio->codec->time_base = AVRational{1, frame_rate};
+
+ /* open it */
+ if (avcodec_open2(avstream_audio->codec, codec_audio, NULL) < 0) {
+ fprintf(stderr, "Could not open codec\n");
+ exit(1);
+ }
if (avformat_write_header(avctx, NULL) < 0) {
fprintf(stderr, "%s: avformat_write_header() failed\n", output_filename);
exit(1);
}
- coded_fp = fopen("dump.h264", "wb");
- assert(coded_fp != NULL);
-
frame_width = width;
frame_height = height;
frame_width_mbaligned = (frame_width + 15) & (~15);
return true;
}
-void H264Encoder::end_frame(RefCountedGLsync fence, const std::vector<FrameAllocator::Frame> &input_frames_to_release)
+void H264Encoder::end_frame(RefCountedGLsync fence, std::vector<float> audio, const std::vector<RefCountedFrame> &input_frames)
{
{
unique_lock<mutex> lock(frame_queue_mutex);
- pending_frames[current_storage_frame++] = PendingFrame{ fence, input_frames_to_release };
+ pending_frames[current_storage_frame++] = PendingFrame{ fence, input_frames, move(audio) };
}
frame_queue_nonempty.notify_one();
}
unique_lock<mutex> lock(frame_queue_mutex);
frame_queue_nonempty.wait(lock, [this]{ return copy_thread_should_quit || pending_frames.count(current_frame_display) != 0; });
if (copy_thread_should_quit) return;
- frame = pending_frames[current_frame_display];
+ frame = move(pending_frames[current_frame_display]);
pending_frames.erase(current_frame_display);
}
glClientWaitSync(frame.fence.get(), 0, 0);
// Release back any input frames we needed to render this frame.
- // (Actually, those that were needed one output frame ago.)
- for (FrameAllocator::Frame input_frame : frame.input_frames_to_release) {
- input_frame.owner->release_frame(input_frame);
- }
+ frame.input_frames.clear();
// Unmap the image.
GLSurface *surf = &gl_surfaces[current_frame_display % SURFACE_NUM];
// so now the data is done encoding (well, async job kicked off)...
// we send that to the storage thread
- storage_task_enqueue(current_frame_display, current_frame_encoding, current_frame_type);
+ storage_task tmp;
+ tmp.display_order = current_frame_display;
+ tmp.encode_order = current_frame_encoding;
+ tmp.frame_type = current_frame_type;
+ tmp.audio = move(frame.audio);
+ storage_task_enqueue(move(tmp));
update_ReferenceFrames();
++current_frame_encoding;