#include <thread>
#include <vector>
-#include "bmusb.h"
+#include "bmusb/bmusb.h"
#include "context.h"
#include "h264encode.h"
#include "pbo_frame_allocator.h"
#include "ref_counted_gl_sync.h"
+#include "timebase.h"
class QOpenGLContext;
Mixer *global_mixer = nullptr;
+namespace {
+
+void convert_fixed24_to_fp32(float *dst, size_t out_channels, const uint8_t *src, size_t in_channels, size_t num_samples)
+{
+ for (size_t i = 0; i < num_samples; ++i) {
+ for (size_t j = 0; j < out_channels; ++j) {
+ uint32_t s1 = *src++;
+ uint32_t s2 = *src++;
+ uint32_t s3 = *src++;
+ uint32_t s = s1 | (s1 << 8) | (s2 << 16) | (s3 << 24);
+ dst[i * out_channels + j] = int(s) * (1.0f / 4294967296.0f);
+ }
+ src += 3 * (in_channels - out_channels);
+ }
+}
+
+} // namespace
+
Mixer::Mixer(const QSurfaceFormat &format)
: mixer_surface(create_surface(format)),
h264_encoder_surface(create_surface(format))
[this]{
resource_pool->clean_context();
});
+ card->resampler = new Resampler(48000.0, 48000.0, 2);
card->usb->configure_card();
}
BMUSBCapture::stop_bm_thread();
for (int card_index = 0; card_index < NUM_CARDS; ++card_index) {
- cards[card_index].new_data_ready = false; // Unblock thread.
- cards[card_index].new_data_ready_changed.notify_all();
+ {
+ std::unique_lock<std::mutex> lock(bmusb_mutex);
+ cards[card_index].should_quit = true; // Unblock thread.
+ cards[card_index].new_data_ready_changed.notify_all();
+ }
cards[card_index].usb->stop_dequeue_thread();
}
}
if (video_frame.len - video_offset != 1280 * 750 * 2) {
printf("dropping frame with wrong length (%ld)\n", video_frame.len - video_offset);
- FILE *fp = fopen("frame.raw", "wb");
- fwrite(video_frame.data, video_frame.len, 1, fp);
- fclose(fp);
- //exit(1);
card->usb->get_video_frame_allocator()->release_frame(video_frame);
card->usb->get_audio_frame_allocator()->release_frame(audio_frame);
return;
}
+ if (audio_frame.len - audio_offset > 30000) {
+ printf("dropping frame with implausible audio length (%ld)\n", audio_frame.len - audio_offset);
+ card->usb->get_video_frame_allocator()->release_frame(video_frame);
+ card->usb->get_audio_frame_allocator()->release_frame(audio_frame);
+ return;
+ }
+
{
// Wait until the previous frame was consumed.
std::unique_lock<std::mutex> lock(bmusb_mutex);
- card->new_data_ready_changed.wait(lock, [card]{ return !card->new_data_ready; });
+ card->new_data_ready_changed.wait(lock, [card]{ return !card->new_data_ready || card->should_quit; });
+ if (card->should_quit) return;
}
const PBOFrameAllocator::Userdata *userdata = (const PBOFrameAllocator::Userdata *)video_frame.userdata;
GLuint pbo = userdata->pbo;
GLsync fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
check_error();
assert(fence != nullptr);
+
+ // Convert the audio to stereo fp32 and store it next to the video.
+ size_t num_samples = (audio_frame.len - audio_offset) / 8 / 3;
+ vector<float> audio;
+ audio.resize(num_samples * 2);
+ convert_fixed24_to_fp32(&audio[0], 2, audio_frame.data + audio_offset, 8, num_samples);
+
{
std::unique_lock<std::mutex> lock(bmusb_mutex);
card->new_data_ready = true;
card->new_frame = RefCountedFrame(video_frame);
card->new_data_ready_fence = fence;
+ card->new_frame_audio = move(audio);
card->new_data_ready_changed.notify_all();
}
card_copy[card_index].new_data_ready = card->new_data_ready;
card_copy[card_index].new_frame = card->new_frame;
card_copy[card_index].new_data_ready_fence = card->new_data_ready_fence;
+ card_copy[card_index].new_frame_audio = move(card->new_frame_audio);
card->new_data_ready = false;
card->new_data_ready_changed.notify_all();
}
RefCountedGLsync fence(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
check_error();
+ // Resample the audio as needed.
+ // TODO: Allow using audio from the other card(s) as well.
+ double pts = frame / 60.0;
+ cards[0].resampler->add_input_samples(pts, card_copy[0].new_frame_audio.data(), card_copy[0].new_frame_audio.size() / 2);
+ vector<float> samples_out;
+ samples_out.resize((48000 / 60) * 2);
+ cards[0].resampler->get_output_samples(pts, &samples_out[0], 48000 / 60);
+
// Make sure the H.264 gets a reference to all the
// input frames needed, so that they are not released back
// until the rendering is done.
for (int card_index = 0; card_index < NUM_CARDS; ++card_index) {
input_frames.push_back(bmusb_current_rendering_frame[card_index]);
}
- h264_encoder->end_frame(fence, input_frames);
+ h264_encoder->end_frame(fence, frame * (TIMEBASE / 60), move(samples_out), input_frames);
// The live frame just shows the RGBA texture we just rendered.
// It owns rgba_tex now.
// chain->print_phase_timing();
}
+#if 0
// Reset every 100 frames, so that local variations in frame times
// (especially for the first few frames, when the shaders are
// compiled etc.) don't make it hard to measure for the entire
frame = 0;
start = now;
}
+#endif
check_error();
}