X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=mixer.h;h=07a31e959fe5a21e2b268ed7f4aad11e0ac2e90a;hb=b22d8d6b38d060ccc5dfa591712211caf9ca3968;hp=89a80dd52fabaccd8e81f1743c491f41b4d329ff;hpb=194872611b21bb070f4dfcf5beda6c36d25459ea;p=nageru diff --git a/mixer.h b/mixer.h index 89a80dd..07a31e9 100644 --- a/mixer.h +++ b/mixer.h @@ -170,13 +170,24 @@ public: void reset_meters(); + struct BufferedFrame { + RefCountedFrame frame; + unsigned field_number; + }; + + BufferedFrame get_buffered_frame(int card, int history_pos) + { + return buffered_frames[card][history_pos]; + } + private: void bm_frame(unsigned card_index, uint16_t timecode, FrameAllocator::Frame video_frame, size_t video_offset, uint16_t video_format, FrameAllocator::Frame audio_frame, size_t audio_offset, uint16_t audio_format); void place_rectangle(movit::Effect *resample_effect, movit::Effect *padding_effect, float x0, float y0, float x1, float y1); void thread_func(); - void process_audio_one_frame(); + void audio_thread_func(); + void process_audio_one_frame(int64_t frame_pts_int, int num_samples); void subsample_chroma(GLuint src_tex, GLuint dst_dst); void release_display_frame(DisplayFrame *frame); double pts() { return double(pts_int) / TIMEBASE; } @@ -208,17 +219,30 @@ private: bool new_data_ready = false; // Whether new_frame contains anything. bool should_quit = false; RefCountedFrame new_frame; + int64_t new_frame_length; // In TIMEBASE units. + bool new_frame_interlaced; + unsigned new_frame_field; // Which field (0 or 1) of the frame to use. Always 0 for progressive. GLsync new_data_ready_fence; // Whether new_frame is ready for rendering. std::condition_variable new_data_ready_changed; // Set whenever new_data_ready is changed. unsigned dropped_frames = 0; // Before new_frame. + // Accumulated errors in number of 1/TIMEBASE samples. If OUTPUT_FREQUENCY divided by + // frame rate is integer, will always stay zero. + unsigned fractional_samples = 0; + std::mutex audio_mutex; std::unique_ptr resampling_queue; // Under audio_mutex. int last_timecode = -1; // Unwrapped. + int64_t next_local_pts = 0; // Beginning of next frame, in TIMEBASE units. }; CaptureCard cards[MAX_CARDS]; // protected by - RefCountedFrame bmusb_current_rendering_frame[MAX_CARDS]; + // For each card, the last three frames (or fields), with 0 being the + // most recent one. Note that we only need the actual history if we have + // interlaced output (for deinterlacing), so if we detect progressive input, + // we immediately clear out all history and all entries will point to the same + // frame. + BufferedFrame buffered_frames[MAX_CARDS][FRAME_HISTORY_LENGTH]; class OutputChannel { public: @@ -240,10 +264,12 @@ private: OutputChannel output_channel[NUM_OUTPUTS]; std::thread mixer_thread; - bool should_quit = false; + std::thread audio_thread; + std::atomic should_quit{false}; audio_level_callback_t audio_level_callback = nullptr; - Ebu_r128_proc r128; + std::mutex r128_mutex; + Ebu_r128_proc r128; // Under r128_mutex. Resampler peak_resampler; std::atomic peak{0.0f}; @@ -265,6 +291,14 @@ private: std::atomic compressor_enabled{true}; std::unique_ptr alsa; + + struct AudioTask { + int64_t pts_int; + int num_samples; + }; + std::mutex audio_mutex; + std::condition_variable audio_task_queue_changed; + std::queue audio_task_queue; // Under audio_mutex. }; extern Mixer *global_mixer;