]> git.sesse.net Git - nageru/blob - ffmpeg_capture.cpp
Add support for FFmpeg inputs.
[nageru] / ffmpeg_capture.cpp
1 #include "ffmpeg_capture.h"
2
3 #include <assert.h>
4 #include <pthread.h>
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 extern "C" {
12 #include <libavcodec/avcodec.h>
13 #include <libavformat/avformat.h>
14 #include <libavutil/avutil.h>
15 #include <libavutil/error.h>
16 #include <libavutil/frame.h>
17 #include <libavutil/imgutils.h>
18 #include <libavutil/mem.h>
19 #include <libavutil/pixfmt.h>
20 #include <libswscale/swscale.h>
21 }
22
23 #include <chrono>
24 #include <cstdint>
25 #include <utility>
26 #include <vector>
27
28 #include "bmusb/bmusb.h"
29 #include "ffmpeg_raii.h"
30 #include "flags.h"
31 #include "image_input.h"
32
33 #define FRAME_SIZE (8 << 20)  // 8 MB.
34
35 using namespace std;
36 using namespace std::chrono;
37 using namespace bmusb;
38
39 FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height)
40         : filename(filename), width(width), height(height)
41 {
42         // Not really used for anything.
43         description = "Video: " + filename;
44 }
45
46 FFmpegCapture::~FFmpegCapture()
47 {
48         if (has_dequeue_callbacks) {
49                 dequeue_cleanup_callback();
50         }
51 }
52
53 void FFmpegCapture::configure_card()
54 {
55         if (video_frame_allocator == nullptr) {
56                 owned_video_frame_allocator.reset(new MallocFrameAllocator(FRAME_SIZE, NUM_QUEUED_VIDEO_FRAMES));
57                 set_video_frame_allocator(owned_video_frame_allocator.get());
58         }
59         if (audio_frame_allocator == nullptr) {
60                 owned_audio_frame_allocator.reset(new MallocFrameAllocator(65536, NUM_QUEUED_AUDIO_FRAMES));
61                 set_audio_frame_allocator(owned_audio_frame_allocator.get());
62         }
63 }
64
65 void FFmpegCapture::start_bm_capture()
66 {
67         if (running) {
68                 return;
69         }
70         running = true;
71         producer_thread_should_quit = false;
72         producer_thread = thread(&FFmpegCapture::producer_thread_func, this);
73 }
74
75 void FFmpegCapture::stop_dequeue_thread()
76 {
77         if (!running) {
78                 return;
79         }
80         running = false;
81         producer_thread_should_quit = true;
82         producer_thread.join();
83 }
84
85 std::map<uint32_t, VideoMode> FFmpegCapture::get_available_video_modes() const
86 {
87         // Note: This will never really be shown in the UI.
88         VideoMode mode;
89
90         char buf[256];
91         snprintf(buf, sizeof(buf), "%ux%u", width, height);
92         mode.name = buf;
93         
94         mode.autodetect = false;
95         mode.width = width;
96         mode.height = height;
97         mode.frame_rate_num = 60;
98         mode.frame_rate_den = 1;
99         mode.interlaced = false;
100
101         return {{ 0, mode }};
102 }
103
104 void FFmpegCapture::producer_thread_func()
105 {
106         char thread_name[16];
107         snprintf(thread_name, sizeof(thread_name), "FFmpeg_C_%d", card_index);
108         pthread_setname_np(pthread_self(), thread_name);
109
110         while (!producer_thread_should_quit) {
111                 string pathname = search_for_file(filename);
112                 if (filename.empty()) {
113                         fprintf(stderr, "%s not found, sleeping one second and trying again...\n", filename.c_str());
114                         sleep(1);
115                         continue;
116                 }
117                 if (!play_video(pathname)) {
118                         // Error.
119                         fprintf(stderr, "Error when playing %s, sleeping one second and trying again...\n", pathname.c_str());
120                         sleep(1);
121                         continue;
122                 }
123
124                 // Probably just EOF, will exit the loop above on next test.
125         }
126
127         if (has_dequeue_callbacks) {
128                 dequeue_cleanup_callback();
129                 has_dequeue_callbacks = false;
130         }
131 }
132
133 bool FFmpegCapture::play_video(const string &pathname)
134 {
135         auto format_ctx = avformat_open_input_unique(pathname.c_str(), nullptr, nullptr);
136         if (format_ctx == nullptr) {
137                 fprintf(stderr, "%s: Error opening file\n", pathname.c_str());
138                 return false;
139         }
140
141         if (avformat_find_stream_info(format_ctx.get(), nullptr) < 0) {
142                 fprintf(stderr, "%s: Error finding stream info\n", pathname.c_str());
143                 return false;
144         }
145
146         int video_stream_index = -1, audio_stream_index = -1;
147         AVRational video_timebase{ 1, 1 };
148         for (unsigned i = 0; i < format_ctx->nb_streams; ++i) {
149                 if (format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
150                     video_stream_index == -1) {
151                         video_stream_index = i;
152                         video_timebase = format_ctx->streams[i]->time_base;
153                 }
154                 if (format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO &&
155                     audio_stream_index == -1) {
156                         audio_stream_index = i;
157                 }
158         }
159         if (video_stream_index == -1) {
160                 fprintf(stderr, "%s: No video stream found\n", pathname.c_str());
161                 return false;
162         }
163
164         const AVCodecParameters *codecpar = format_ctx->streams[video_stream_index]->codecpar;
165         AVCodecContextWithDeleter codec_ctx = avcodec_alloc_context3_unique(nullptr);
166         if (avcodec_parameters_to_context(codec_ctx.get(), codecpar) < 0) {
167                 fprintf(stderr, "%s: Cannot fill codec parameters\n", pathname.c_str());
168                 return false;
169         }
170         AVCodec *codec = avcodec_find_decoder(codecpar->codec_id);
171         if (codec == nullptr) {
172                 fprintf(stderr, "%s: Cannot find decoder\n", pathname.c_str());
173                 return false;
174         }
175         if (avcodec_open2(codec_ctx.get(), codec, nullptr) < 0) {
176                 fprintf(stderr, "%s: Cannot open decoder\n", pathname.c_str());
177                 return false;
178         }
179         unique_ptr<AVCodecContext, decltype(avcodec_close)*> codec_ctx_cleanup(
180                 codec_ctx.get(), avcodec_close);
181
182         steady_clock::time_point start = steady_clock::now();
183
184         unique_ptr<SwsContext, decltype(sws_freeContext)*> sws_ctx(nullptr, sws_freeContext);
185         int sws_last_width = -1, sws_last_height = -1;
186
187         // Main loop.
188         while (!producer_thread_should_quit) {
189                 // Read packets until we have a frame or there are none left.
190                 int frame_finished = 0;
191                 AVFrameWithDeleter frame = av_frame_alloc_unique();
192                 bool eof = false;
193                 do {
194                         AVPacket pkt;
195                         unique_ptr<AVPacket, decltype(av_packet_unref)*> pkt_cleanup(
196                                 &pkt, av_packet_unref);
197                         av_init_packet(&pkt);
198                         pkt.data = nullptr;
199                         pkt.size = 0;
200                         if (av_read_frame(format_ctx.get(), &pkt) == 0) {
201                                 if (pkt.stream_index != video_stream_index) {
202                                         // Ignore audio for now.
203                                         continue;
204                                 }
205                                 if (avcodec_send_packet(codec_ctx.get(), &pkt) < 0) {
206                                         fprintf(stderr, "%s: Cannot send packet to codec.\n", pathname.c_str());
207                                         return false;
208                                 }
209                         } else {
210                                 eof = true;  // Or error, but ignore that for the time being.
211                         }
212
213                         int err = avcodec_receive_frame(codec_ctx.get(), frame.get());
214                         if (err == 0) {
215                                 frame_finished = true;
216                                 break;
217                         } else if (err != AVERROR(EAGAIN)) {
218                                 fprintf(stderr, "%s: Cannot receive frame from codec.\n", pathname.c_str());
219                                 return false;
220                         }
221                 } while (!eof);
222
223                 if (!frame_finished) {
224                         // EOF. Loop back to the start if we can.
225                         if (av_seek_frame(format_ctx.get(), /*stream_index=*/-1, /*timestamp=*/0, /*flags=*/0) < 0) {
226                                 fprintf(stderr, "%s: Rewind failed, not looping.\n", pathname.c_str());
227                                 return true;
228                         }
229                         start = steady_clock::now();
230                         continue;
231                 }
232
233                 if (sws_ctx == nullptr || sws_last_width != frame->width || sws_last_height != frame->height) {
234                         sws_ctx.reset(
235                                 sws_getContext(frame->width, frame->height, (AVPixelFormat)frame->format,
236                                         width, height, AV_PIX_FMT_RGBA,
237                                         SWS_BICUBIC, nullptr, nullptr, nullptr));
238                         sws_last_width = frame->width;
239                         sws_last_height = frame->height;
240                 }
241                 if (sws_ctx == nullptr) {
242                         fprintf(stderr, "%s: Could not create scaler context\n", pathname.c_str());
243                         return false;
244                 }
245
246                 VideoFormat video_format;
247                 video_format.width = width;
248                 video_format.height = height;
249                 video_format.stride = width * 4;
250                 video_format.frame_rate_nom = video_timebase.den;
251                 video_format.frame_rate_den = av_frame_get_pkt_duration(frame.get()) * video_timebase.num;
252                 video_format.has_signal = true;
253                 video_format.is_connected = true;
254
255                 const duration<double> pts(frame->pts * double(video_timebase.num) / double(video_timebase.den));
256                 const steady_clock::time_point frame_start = start + duration_cast<steady_clock::duration>(pts);
257
258                 FrameAllocator::Frame video_frame = video_frame_allocator->alloc_frame();
259                 if (video_frame.data != nullptr) {
260                         uint8_t *pic_data[4] = { video_frame.data, nullptr, nullptr, nullptr };
261                         int linesizes[4] = { int(video_format.stride), 0, 0, 0 };
262                         sws_scale(sws_ctx.get(), frame->data, frame->linesize, 0, frame->height, pic_data, linesizes);
263                         video_frame.len = video_format.stride * height;
264                         video_frame.received_timestamp = frame_start;
265                 }
266
267                 FrameAllocator::Frame audio_frame;
268                 AudioFormat audio_format;
269                 audio_format.bits_per_sample = 32;
270                 audio_format.num_channels = 8;
271
272                 this_thread::sleep_until(frame_start);
273                 frame_callback(timecode++,
274                         video_frame, 0, video_format,
275                         audio_frame, 0, audio_format);
276         }
277         return true;
278 }