]> git.sesse.net Git - nageru/blobdiff - futatabi/video_stream.cpp
Add audio output when playing at 100% speed.
[nageru] / futatabi / video_stream.cpp
index 06acfd2601a43ebe4ff17af8b0f7fffee788fc25..9647836feaa4294a4653bdb8eb7b84b8af77da27 100644 (file)
@@ -13,6 +13,7 @@ extern "C" {
 #include "player.h"
 #include "shared/context.h"
 #include "shared/httpd.h"
+#include "shared/shared_defs.h"
 #include "shared/mux.h"
 #include "util.h"
 #include "ycbcr_converter.h"
@@ -286,10 +287,19 @@ void VideoStream::start()
                avctx->flags = AVFMT_FLAG_CUSTOM_IO;
        }
 
+       AVCodecParameters *audio_codecpar = avcodec_parameters_alloc();
+
+       audio_codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
+       audio_codecpar->codec_id = AV_CODEC_ID_PCM_S32LE;
+       audio_codecpar->channel_layout = AV_CH_LAYOUT_STEREO;
+       audio_codecpar->channels = 2;
+       audio_codecpar->sample_rate = OUTPUT_FREQUENCY;
+
        size_t width = global_flags.width, height = global_flags.height;  // Doesn't matter for MJPEG.
-       mux.reset(new Mux(avctx, width, height, Mux::CODEC_MJPEG, /*video_extradata=*/"", /*audio_codec_parameters=*/nullptr,
+       mux.reset(new Mux(avctx, width, height, Mux::CODEC_MJPEG, /*video_extradata=*/"", audio_codecpar,
                          AVCOL_SPC_BT709, COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}, Mux::WITH_SUBTITLES));
 
+       avcodec_parameters_free(&audio_codecpar);
        encode_thread = thread(&VideoStream::encode_thread_func, this);
 }
 
@@ -331,12 +341,10 @@ void VideoStream::clear_queue()
 void VideoStream::schedule_original_frame(steady_clock::time_point local_pts,
                                           int64_t output_pts, function<void()> &&display_func,
                                           QueueSpotHolder &&queue_spot_holder,
-                                          FrameOnDisk frame, const string &subtitle)
+                                          FrameOnDisk frame, const string &subtitle, bool include_audio)
 {
        fprintf(stderr, "output_pts=%" PRId64 "  original      input_pts=%" PRId64 "\n", output_pts, frame.pts);
 
-       // TODO: Write audio if at the right speed.
-
        QueuedFrame qf;
        qf.local_pts = local_pts;
        qf.type = QueuedFrame::ORIGINAL;
@@ -344,7 +352,9 @@ void VideoStream::schedule_original_frame(steady_clock::time_point local_pts,
        qf.display_func = move(display_func);
        qf.queue_spot_holder = move(queue_spot_holder);
        qf.subtitle = subtitle;
-       qf.encoded_jpeg.reset(new string(frame_reader.read_frame(frame, /*read_audio=*/false).video));
+       FrameReader::Frame read_frame = frame_reader.read_frame(frame, /*read_video=*/true, include_audio);
+       qf.encoded_jpeg.reset(new string(move(read_frame.video)));
+       qf.audio = move(read_frame.audio);
 
        lock_guard<mutex> lock(queue_lock);
        frame_queue.push_back(move(qf));
@@ -424,7 +434,8 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts
                                               int64_t output_pts, function<void(shared_ptr<Frame>)> &&display_func,
                                               QueueSpotHolder &&queue_spot_holder,
                                               FrameOnDisk frame1, FrameOnDisk frame2,
-                                              float alpha, FrameOnDisk secondary_frame, float fade_alpha, const string &subtitle)
+                                              float alpha, FrameOnDisk secondary_frame, float fade_alpha, const string &subtitle,
+                                              bool play_audio)
 {
        if (secondary_frame.pts != -1) {
                fprintf(stderr, "output_pts=%" PRId64 "  interpolated  input_pts1=%" PRId64 " input_pts2=%" PRId64 " alpha=%.3f  secondary_pts=%" PRId64 "  fade_alpha=%.2f\n", output_pts, frame1.pts, frame2.pts, alpha, secondary_frame.pts, fade_alpha);
@@ -452,6 +463,10 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts
        qf.local_pts = local_pts;
        qf.subtitle = subtitle;
 
+       if (play_audio) {
+               qf.audio = frame_reader.read_frame(frame1, /*read_video=*/false, /*read_audio=*/true).audio;
+       }
+
        check_error();
 
        // Convert frame0 and frame1 to OpenGL textures.
@@ -563,6 +578,20 @@ void VideoStream::schedule_refresh_frame(steady_clock::time_point local_pts,
        queue_changed.notify_all();
 }
 
+void VideoStream::schedule_silence(steady_clock::time_point local_pts, int64_t output_pts,
+                                   int64_t length_pts, QueueSpotHolder &&queue_spot_holder)
+{
+       QueuedFrame qf;
+       qf.type = QueuedFrame::SILENCE;
+       qf.output_pts = output_pts;
+       qf.queue_spot_holder = move(queue_spot_holder);
+       qf.silence_length_pts = length_pts;
+
+       lock_guard<mutex> lock(queue_lock);
+       frame_queue.push_back(move(qf));
+       queue_changed.notify_all();
+}
+
 namespace {
 
 shared_ptr<Frame> frame_from_pbo(void *contents, size_t width, size_t height)
@@ -662,6 +691,8 @@ void VideoStream::encode_thread_func()
                        pkt.flags = AV_PKT_FLAG_KEY;
                        mux->add_packet(pkt, qf.output_pts, qf.output_pts);
                        last_frame = move(jpeg);
+
+                       add_audio_or_silence(qf);
                } else if (qf.type == QueuedFrame::FADED) {
                        glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
 
@@ -678,6 +709,8 @@ void VideoStream::encode_thread_func()
                        pkt.flags = AV_PKT_FLAG_KEY;
                        mux->add_packet(pkt, qf.output_pts, qf.output_pts);
                        last_frame = move(jpeg);
+
+                       add_audio_or_silence(qf);
                } else if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) {
                        glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
 
@@ -705,6 +738,8 @@ void VideoStream::encode_thread_func()
                        pkt.flags = AV_PKT_FLAG_KEY;
                        mux->add_packet(pkt, qf.output_pts, qf.output_pts);
                        last_frame = move(jpeg);
+
+                       add_audio_or_silence(qf);
                } else if (qf.type == QueuedFrame::REFRESH) {
                        AVPacket pkt;
                        av_init_packet(&pkt);
@@ -713,6 +748,10 @@ void VideoStream::encode_thread_func()
                        pkt.size = last_frame.size();
                        pkt.flags = AV_PKT_FLAG_KEY;
                        mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+
+                       add_audio_or_silence(qf);  // Definitely silence.
+               } else if (qf.type == QueuedFrame::SILENCE) {
+                       add_silence(qf.output_pts, qf.silence_length_pts);
                } else {
                        assert(false);
                }
@@ -746,3 +785,38 @@ int VideoStream::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType ty
        }
        return buf_size;
 }
+
+void VideoStream::add_silence(int64_t pts, int64_t length_pts)
+{
+       // At 59.94, this will never quite add up (even discounting refresh frames,
+       // which have unpredictable length), but hopefully, the player in the other
+       // end should be able to stretch silence easily enough.
+       long num_samples = lrint(length_pts * double(OUTPUT_FREQUENCY) / double(TIMEBASE)) * 2;
+       uint8_t *zero = (uint8_t *)calloc(num_samples, sizeof(int32_t));
+
+       AVPacket pkt;
+       av_init_packet(&pkt);
+       pkt.stream_index = 1;
+       pkt.data = zero;
+       pkt.size = num_samples * sizeof(int32_t);
+       pkt.flags = AV_PKT_FLAG_KEY;
+       mux->add_packet(pkt, pts, pts);
+
+       free(zero);
+}
+
+void VideoStream::add_audio_or_silence(const QueuedFrame &qf)
+{
+       if (qf.audio.empty()) {
+               int64_t frame_length = lrint(double(TIMEBASE) / global_flags.output_framerate);
+               add_silence(qf.output_pts, frame_length);
+       } else {
+               AVPacket pkt;
+               av_init_packet(&pkt);
+               pkt.stream_index = 1;
+               pkt.data = (uint8_t *)qf.audio.data();
+               pkt.size = qf.audio.size();
+               pkt.flags = AV_PKT_FLAG_KEY;
+               mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+       }
+}