]> git.sesse.net Git - nageru/blobdiff - nageru/ffmpeg_capture.cpp
In the MJPEG export, include white balance information.
[nageru] / nageru / ffmpeg_capture.cpp
index b4fec063ce702a01dde9d2f7655a553fc471336c..b9eb982008f166fbc7cab97b0e3acc990ee19340 100644 (file)
@@ -191,7 +191,7 @@ YCbCrFormat decode_ycbcr_format(const AVPixFmtDescriptor *desc, const AVFrame *f
                format.cb_y_position = 1.0;
                break;
        default:
-               fprintf(stderr, "Unknown chroma location coefficient enum %d from FFmpeg; choosing Rec. 709.\n",
+               fprintf(stderr, "Unknown chroma location coefficient enum %d from FFmpeg; choosing center.\n",
                        frame->chroma_location);
                format.cb_x_position = 0.5;
                format.cb_y_position = 0.5;
@@ -793,12 +793,12 @@ void FFmpegCapture::convert_audio(const AVFrame *audio_avframe, FrameAllocator::
 
                if (resampler == nullptr) {
                        fprintf(stderr, "Allocating resampler failed.\n");
-                       exit(1);
+                       abort();
                }
 
                if (swr_init(resampler) < 0) {
                        fprintf(stderr, "Could not open resample context.\n");
-                       exit(1);
+                       abort();
                }
 
                last_src_format = AVSampleFormat(audio_avframe->format);
@@ -815,7 +815,7 @@ void FFmpegCapture::convert_audio(const AVFrame *audio_avframe, FrameAllocator::
                const_cast<const uint8_t **>(audio_avframe->data), audio_avframe->nb_samples);
        if (out_samples < 0) {
                 fprintf(stderr, "Audio conversion failed.\n");
-                exit(1);
+                abort();
         }
 
        audio_frame->len += out_samples * bytes_per_sample;