1 // Kaeru (換える), a simple transcoder intended for use with Nageru.
3 #include "audio_encoder.h"
4 #include "basic_stats.h"
7 #include "ffmpeg_capture.h"
9 #include "shared/mux.h"
10 #include "quittable_sleeper.h"
11 #include "shared/timebase.h"
12 #include "x264_encoder.h"
22 #include <libavcodec/bsf.h>
25 using namespace bmusb;
26 using namespace movit;
28 using namespace std::chrono;
29 using namespace std::placeholders;
31 Mixer *global_mixer = nullptr;
32 X264Encoder *global_x264_encoder = nullptr;
34 BasicStats *global_basic_stats = nullptr;
35 QuittableSleeper should_quit;
36 MuxMetrics stream_mux_metrics;
38 //unsigned frameno = 0;
39 double video_start_time = 0;
40 double fps = 60.0 / 1.001;
45 std::vector<BodetMsg> bodet_msgs;
47 string team1, team2, team1color, team2color;
48 int score1 = 0, score2 = 0, bodet_clock = 0;
49 std::string output_filename = "out.mp4";
53 int write_packet(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
55 static bool seen_sync_markers = false;
56 static string stream_mux_header;
57 HTTPD *httpd = (HTTPD *)opaque;
59 if (type == AVIO_DATA_MARKER_SYNC_POINT || type == AVIO_DATA_MARKER_BOUNDARY_POINT) {
60 seen_sync_markers = true;
61 } else if (type == AVIO_DATA_MARKER_UNKNOWN && !seen_sync_markers) {
62 // We don't know if this is a keyframe or not (the muxer could
63 // avoid marking it), so we just have to make the best of it.
64 type = AVIO_DATA_MARKER_SYNC_POINT;
67 HTTPD::StreamID stream_id{ HTTPD::MAIN_STREAM, 0 };
68 if (type == AVIO_DATA_MARKER_HEADER) {
69 stream_mux_header.append((char *)buf, buf_size);
70 httpd->set_header(stream_id, stream_mux_header);
72 httpd->add_data(stream_id, (char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
79 unique_ptr<Mux> create_mux(HTTPD *httpd, const AVOutputFormat *oformat, X264Encoder *x264_encoder, AudioEncoder *audio_encoder)
81 AVFormatContext *avctx = avformat_alloc_context();
82 avctx->oformat = const_cast<decltype(avctx->oformat)>(oformat); // const_cast is a hack to work in FFmpeg both before and after 5.0.
84 uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
85 //avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, httpd, nullptr, nullptr, nullptr);
86 //avctx->pb->write_data_type = &write_packet;
87 //avctx->pb->ignore_boundary_point = 1;
88 //avctx->flags = AVFMT_FLAG_CUSTOM_IO;
89 avio_open(&avctx->pb, output_filename.c_str(), AVIO_FLAG_WRITE);
91 string video_extradata = x264_encoder->get_global_headers();
93 // If audio is disabled (ie., we won't ever see any audio packets),
94 // set nullptr here to also not include the stream in the mux.
95 AVCodecParameters *audio_codecpar =
96 global_flags.enable_audio ? audio_encoder->get_codec_parameters().release() : nullptr;
99 mux.reset(new Mux(avctx, global_flags.width, global_flags.height, Mux::CODEC_H264, video_extradata, audio_codecpar,
100 get_color_space(global_flags.ycbcr_rec709_coefficients), COARSE_TIMEBASE,
101 /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, { &stream_mux_metrics }));
102 stream_mux_metrics.init({{ "destination", "http" }});
106 uint8_t *vfd = nullptr;
107 uint8_t cefimg[1280 * 720 * 4];
108 SwsContext *sws = nullptr;
111 #include <libswscale/swscale.h>
114 void convert_stuff(const VideoFormat &video_format, const uint8_t *ptr)
116 if (sws == nullptr) {
117 sws = sws_getContext(video_format.width, video_format.height, AV_PIX_FMT_BGRA,
118 video_format.width, video_format.height, AV_PIX_FMT_NV12,
119 SWS_BICUBIC, nullptr, nullptr, nullptr);
120 vfd = new uint8_t[video_format.width * video_format.height * 2];
123 uint8_t *src_pic_data[4] = { nullptr, nullptr, nullptr, nullptr };
124 int src_linesizes[4] = { 0, 0, 0, 0 };
125 src_pic_data[0] = (uint8_t *)ptr;
126 src_linesizes[0] = video_format.width * 4;
128 uint8_t *dst_pic_data[4] = { nullptr, nullptr, nullptr, nullptr };
129 int dst_linesizes[4] = { 0, 0, 0, 0 };
130 dst_pic_data[0] = vfd;
131 dst_linesizes[0] = video_format.width;
132 dst_pic_data[1] = vfd + video_format.width * video_format.height;
133 dst_linesizes[1] = video_format.width;
135 sws_scale(sws, src_pic_data, src_linesizes, 0, video_format.height, dst_pic_data, dst_linesizes);
139 #include <cef_browser.h>
140 #include <cef_client.h>
141 #include "nageru_cef_app.h"
143 recursive_mutex browser_mutex;
144 int browser_ready = 0;
146 class KaeruCEFClient : public CefClient, public CefRenderHandler, public CefLoadHandler
151 CefRefPtr<CefRenderHandler> GetRenderHandler() override
156 CefRefPtr<CefLoadHandler> GetLoadHandler() override
163 void OnPaint(CefRefPtr<CefBrowser> browser, PaintElementType type, const RectList &dirtyRects, const void *buffer, int width, int height) override
165 // fprintf(stderr, "onpaint %dx%d\n", width, height);
166 memcpy(cefimg, buffer, width * height * 4); // FIXME lock?
168 lock_guard<recursive_mutex> lock(browser_mutex);
169 if (browser_ready == 1)
173 void GetViewRect(CefRefPtr<CefBrowser> browser, CefRect &rect) override
175 fprintf(stderr, "getviewrect\n");
176 rect = CefRect(0, 0, 1280, 720);
181 void OnLoadEnd(CefRefPtr<CefBrowser> browser, CefRefPtr<CefFrame> frame, int httpStatusCode) override
183 fprintf(stderr, "onload\n");
185 CefString script_url("<theme eval>");
187 browser->GetMainFrame()->ExecuteJavaScript("play();", script_url, start_line);
189 lock_guard<recursive_mutex> lock(browser_mutex);
196 IMPLEMENT_REFCOUNTING(KaeruCEFClient);
199 CefRefPtr<NageruCefApp> cef_app;
200 CefRefPtr<CefBrowser> browser;
201 unique_ptr<KaeruCEFClient> cef_client;
203 int parse_digit(char ch)
205 if (ch >= '0' && ch <= '9') {
211 int parse_clock(char ch1, char ch2)
213 int s1 = parse_digit(ch1);
214 int s2 = parse_digit(ch2);
219 int parse_score_weird(char ch1, char ch2, char ch3)
223 if (ch1 != ' ') *ptr++ = ch1;
224 if (ch2 != ' ') *ptr++ = ch2;
225 if (ch3 != ' ') *ptr++ = ch3;
231 void add_cef(uint8_t *data, unsigned width, unsigned height, int64_t video_pts, AVRational video_timebase)
233 if (cef_client == nullptr) {
234 cef_client.reset(new KaeruCEFClient);
236 //cef_app.reset(new NageruCefApp);
237 cef_app->initialize_cef();
239 CefPostTask(TID_UI, new CEFTaskAdapter([&]{
240 lock_guard<recursive_mutex> lock(browser_mutex);
242 CefBrowserSettings browser_settings;
243 // browser_settings.web_security = cef_state_t::STATE_DISABLED;
244 browser_settings.webgl = cef_state_t::STATE_DISABLED;
245 browser_settings.windowless_frame_rate = 60.00;
247 CefWindowInfo window_info;
248 window_info.SetAsWindowless(0);
249 browser = CefBrowserHost::CreateBrowserSync(window_info, cef_client.get(), "file:///home/sesse/dev/ultimatescore/score.html", browser_settings, nullptr, nullptr);
254 CefString script_url("<theme eval>");
258 int old_bodet_clock = bodet_clock;
260 //fprintf(stderr, "video_pts=%ld timebase = %ld/%ld\n", video_pts, video_timebase.num, video_timebase.den);
261 //double cur_time = video_start_time + video_pts * double(video_timebase.num) / double(video_timebase.den);
262 double cur_time = video_start_time + video_pts / double(TIMEBASE);
263 //double cur_time = video_start_time + (frameno++) / fps;
264 while (cur_msg < bodet_msgs.size() && cur_time > bodet_msgs[cur_msg].t) {
265 const string &m = bodet_msgs[cur_msg].msg;
266 if (m.size() >= 10 && m[0] == 'G' && m[1] == '1' && m[2] == '0') {
267 int min = parse_clock(m[4], m[5]);
268 int sec = parse_clock(m[6], m[7]);
269 bodet_clock = min * 60 + sec;
270 score1 = parse_score_weird(m[8], m[9], m[10]);
271 score2 = parse_score_weird(m[11], m[12], m[13]);
276 string str = "update('{";
277 snprintf(buf, 256, "\"score1\": %d", score1);
279 snprintf(buf, 256, ",\"score2\": %d", score2);
283 int doh = uint64_t(cur_time + 7200) % 86400;
284 snprintf(buf, 256, "%02d:%02d:%02d", doh / 3600, (doh % 3600) / 60, doh % 60);
288 str += ",\"team1\": \"" + team1 + "\"";
289 str += ",\"team2\": \"" + team2 + "\"";
290 str += ",\"team1color\": \"" + team1color + "\"";
291 str += ",\"team2color\": \"" + team2color + "\"";
292 str += "}');setteams();setcolors();setscore();";
294 snprintf(buf, 256, "update_given_clock(%d,'clock');", bodet_clock);
297 if (old_bodet_clock == 0 && bodet_clock != 0) {
298 str += "showclock();";
299 } else if (old_bodet_clock != 0 && bodet_clock == 0) {
300 str += "hideclock();";
303 //printf("%s\n", str.c_str());
307 browser_mutex.lock();
308 if (browser_ready >= 2) {
309 browser->GetMainFrame()->ExecuteJavaScript(str, script_url, start_line);
310 browser_mutex.unlock();
313 browser_mutex.unlock();
314 printf("Waiting for CEF...\n");
320 unsigned char r0, g0, b0;
321 unsigned char a1, r1, g1, b1;
322 unsigned char *sptr = cefimg;
323 unsigned char *dptr = data;
324 for (int i = 0; i < 1280 * 720; ++i) {
336 unsigned r = r0 + ((r1 - r0) * a1) / 255;
337 unsigned g = g0 + ((g1 - g0) * a1) / 255;
338 unsigned b = b0 + ((b1 - b0) * a1) / 255;
346 //memcpy(data, cefimg, 1280*720*4);
349 double crop_start = 0.0;
350 double crop_end = HUGE_VAL;
352 bool within(double t)
354 return t >= crop_start && t < crop_end;
359 int64_t video_pts_offset = 0, audio_pts_offset = 0;
360 int64_t next_video_pts = 0, next_audio_pts = 0;
362 AVRational prev_video_timebase{0, 0};
363 AVRational prev_audio_timebase{0, 0};
365 void video_frame_callback(FFmpegCapture *video, X264Encoder *x264_encoder, AudioEncoder *audio_encoder,
366 int64_t video_pts, AVRational video_timebase,
367 int64_t audio_pts, AVRational audio_timebase,
369 FrameAllocator::Frame video_frame, size_t video_offset, VideoFormat video_format,
370 FrameAllocator::Frame audio_frame, size_t audio_offset, AudioFormat audio_format)
372 // Our splicing wants consistent timebases...
373 if (video_pts >= 0) {
374 if (prev_video_timebase.den == 0) {
375 prev_video_timebase = video_timebase;
377 video_pts = av_rescale_q(video_pts, video_timebase, prev_video_timebase);
378 video_timebase = prev_video_timebase;
380 if (audio_pts >= 0) {
381 if (prev_audio_timebase.den == 0) {
382 prev_audio_timebase = audio_timebase;
384 audio_pts = av_rescale_q(audio_pts, audio_timebase, prev_audio_timebase);
385 audio_timebase = prev_audio_timebase;
389 video_pts += video_pts_offset;
391 audio_pts += audio_pts_offset;
392 if ((video_pts >= 0 && video_pts < next_video_pts) || (audio_pts >= 0 && audio_pts < next_audio_pts)) {
393 printf("=== next file (%ld < %ld || %ld < %ld) ===\n", video_pts, next_video_pts, audio_pts, next_audio_pts);
395 video_pts -= video_pts_offset;
397 audio_pts -= audio_pts_offset;
398 video_pts_offset = next_video_pts;
399 audio_pts_offset = next_audio_pts;
401 video_pts += video_pts_offset;
403 audio_pts += audio_pts_offset;
406 double cur_video_time = video_start_time + video_pts * double(video_timebase.num) / double(video_timebase.den);
407 double cur_audio_time = video_start_time + audio_pts * double(audio_timebase.num) / double(audio_timebase.den);
410 int doh = uint64_t(cur_video_time + 7200) % 86400;
411 snprintf(ts, 256, "%02d:%02d:%02d", doh / 3600, (doh % 3600) / 60, doh % 60);
414 if (!within(cur_video_time)) {
415 printf("%s [skip]\n", ts);
417 if (cur_msg < bodet_msgs.size()) {
418 printf("%s %s\n", ts, bodet_msgs[cur_msg].msg.c_str());
426 if (video_pts >= 0 && cur_video_time > crop_end) {
427 printf("=== sending quit signal ===\n");
431 if (video_pts >= 0 && video_frame.len > 0 && within(cur_video_time)) {
432 ReceivedTimestamps ts;
433 ts.ts.push_back(steady_clock::now());
435 //next_video_pts = video_pts + av_rescale_q(1, AVRational{ 1001, 60000 }, video_timebase);
437 // I hate Matroska timestamps and their inaccuracy...
438 next_video_pts = video_pts + av_rescale_q(1, AVRational{ 500, 60000 }, video_timebase);
440 video_pts = av_rescale_q(video_pts, video_timebase, AVRational{ 1, TIMEBASE });
441 int64_t frame_duration = int64_t(TIMEBASE) * video_format.frame_rate_den / video_format.frame_rate_nom;
442 if (team1 != "nocef") {
443 add_cef(video_frame.data + video_offset, video_format.width, video_format.height, video_pts, video_timebase);
445 convert_stuff(video_format, video_frame.data + video_offset);
446 x264_encoder->add_frame(video_pts, frame_duration, video->get_current_frame_ycbcr_format().luma_coefficients, vfd, ts);
448 // x264_encoder->add_frame(video_pts, frame_duration, video->get_current_frame_ycbcr_format().luma_coefficients, video_frame.data + video_offset, ts);
450 global_basic_stats->update(frame_num++, /*dropped_frames=*/0);
452 if (audio_frame.len > 0 && within(cur_audio_time)) {
453 // FFmpegCapture takes care of this for us.
454 assert(audio_format.num_channels == 2);
455 assert(audio_format.sample_rate == OUTPUT_FREQUENCY);
457 // TODO: Reduce some duplication against AudioMixer here.
458 size_t num_samples = audio_frame.len / (audio_format.bits_per_sample / 8);
459 vector<float> float_samples;
460 float_samples.resize(num_samples);
462 if (audio_format.bits_per_sample == 16) {
463 const int16_t *src = (const int16_t *)audio_frame.data;
464 float *dst = &float_samples[0];
465 for (size_t i = 0; i < num_samples; ++i) {
466 *dst++ = int16_t(le16toh(*src++)) * (1.0f / 32768.0f);
468 } else if (audio_format.bits_per_sample == 32) {
469 const int32_t *src = (const int32_t *)audio_frame.data;
470 float *dst = &float_samples[0];
471 for (size_t i = 0; i < num_samples; ++i) {
472 *dst++ = int32_t(le32toh(*src++)) * (1.0f / 2147483648.0f);
477 //next_audio_pts = audio_pts + av_rescale_q(num_samples / 2, AVRational{ 1, OUTPUT_FREQUENCY }, audio_timebase);
479 next_audio_pts = audio_pts + av_rescale_q(num_samples / 4, AVRational{ 1, OUTPUT_FREQUENCY }, audio_timebase);
480 audio_pts = av_rescale_q(audio_pts, audio_timebase, AVRational{ 1, TIMEBASE });
481 audio_encoder->encode_audio(float_samples, audio_pts);
484 if (video_frame.owner) {
485 video_frame.owner->release_frame(video_frame);
487 if (audio_frame.owner) {
488 audio_frame.owner->release_frame(audio_frame);
492 void raw_packet_callback(Mux *mux, int stream_index, const AVPacket *pkt, AVRational timebase)
494 mux->add_packet(*pkt, pkt->pts, pkt->dts == AV_NOPTS_VALUE ? pkt->pts : pkt->dts, timebase, stream_index);
497 void filter_packet_callback(Mux *mux, int stream_index, AVBSFContext *bsfctx, const AVPacket *pkt, AVRational timebase)
499 if (pkt->size <= 2 || pkt->data[0] != 0xff || (pkt->data[1] & 0xf0) != 0xf0) {
500 // Not ADTS data, so just pass it through.
501 mux->add_packet(*pkt, pkt->pts, pkt->dts == AV_NOPTS_VALUE ? pkt->pts : pkt->dts, timebase, stream_index);
505 AVPacket *in_pkt = av_packet_clone(pkt);
506 unique_ptr<AVPacket, decltype(av_packet_unref) *> in_pkt_cleanup(in_pkt, av_packet_unref);
507 int err = av_bsf_send_packet(bsfctx, in_pkt);
509 fprintf(stderr, "av_bsf_send_packet() failed with %d, ignoring\n", err);
513 unique_ptr<AVPacket, decltype(av_packet_unref) *> pkt_cleanup(&out_pkt, av_packet_unref);
514 av_init_packet(&out_pkt);
515 err = av_bsf_receive_packet(bsfctx, &out_pkt);
516 if (err == AVERROR(EAGAIN)) {
520 fprintf(stderr, "av_bsf_receive_packet() failed with %d, ignoring\n", err);
523 mux->add_packet(out_pkt, out_pkt.pts, out_pkt.dts == AV_NOPTS_VALUE ? out_pkt.pts : out_pkt.dts, timebase, stream_index);
527 void adjust_bitrate(int signal)
529 int new_bitrate = global_flags.x264_bitrate;
530 if (signal == SIGUSR1) {
532 if (new_bitrate > 100000) {
533 fprintf(stderr, "Ignoring SIGUSR1, can't increase bitrate below 100000 kbit/sec (currently at %d kbit/sec)\n",
534 global_flags.x264_bitrate);
536 fprintf(stderr, "Increasing bitrate to %d kbit/sec due to SIGUSR1.\n", new_bitrate);
537 global_flags.x264_bitrate = new_bitrate;
538 global_x264_encoder->change_bitrate(new_bitrate);
540 } else if (signal == SIGUSR2) {
542 if (new_bitrate < 100) {
543 fprintf(stderr, "Ignoring SIGUSR2, can't decrease bitrate below 100 kbit/sec (currently at %d kbit/sec)\n",
544 global_flags.x264_bitrate);
546 fprintf(stderr, "Decreasing bitrate to %d kbit/sec due to SIGUSR2.\n", new_bitrate);
547 global_flags.x264_bitrate = new_bitrate;
548 global_x264_encoder->change_bitrate(new_bitrate);
553 void request_quit(int signal)
562 } else if (ch >= 'A' && ch <= 'F') {
563 return 10 + (ch - 'A');
571 double parse_time(const char *str)
574 strptime(str, "%Y-%m-%d %H:%M:%S", &tm);
579 std::vector<std::string> split(const std::string& str, char delim) {
580 std::vector<std::string> strings;
583 while ((start = str.find_first_not_of(delim, end)) != std::string::npos) {
584 end = str.find(delim, start);
585 strings.push_back(str.substr(start, end - start));
590 int main(int argc, char *argv[])
592 CefMainArgs main_args(argc, argv);
593 cef_app = CefRefPtr<NageruCefApp>(new NageruCefApp());
594 int err = CefExecuteProcess(main_args, cef_app.get(), nullptr);
599 // CEF wants to use GLib for its main loop, which interferes with Qt's use of it.
600 // The alternative is trying to integrate CEF into Qt's main loop, but that requires
601 // fairly extensive cross-thread communication and that parts of CEF runs on Qt's UI
603 setenv("QT_NO_GLIB", "1", 0);
605 parse_flags(PROGRAM_KAERU, argc, argv);
607 video_start_time = atof(argv[optind + 1]);
608 team1 = argv[optind + 3];
609 team2 = argv[optind + 4];
610 team1color = argv[optind + 5];
611 team2color = argv[optind + 6];
612 if (argc > optind + 7) crop_start = parse_time(argv[optind + 7]);
613 if (argc > optind + 8) crop_end = parse_time(argv[optind + 8]);
614 if (argc > optind + 9) output_filename = argv[optind + 9];
615 //printf("crop= %f %f\n", crop_start, crop_end);
618 FILE *msgfp = fopen(argv[optind + 2], "r");
619 while (!feof(msgfp)) {
621 if (fscanf(msgfp, "%lf,%s", &t, msgbuf) != 2) break;
624 if (t < video_start_time) {
627 for (unsigned i = 1; i < strlen(msgbuf) / 2; ++i) {
628 bm.msg.push_back(hex(msgbuf[i * 2]) * 16 + hex(msgbuf[i * 2 + 1]));
630 bodet_msgs.push_back(bm);
631 printf("%.3f %s\n", t, bm.msg.c_str());
635 global_flags.max_num_cards = 1; // For latency metrics.
637 #if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(58, 9, 100)
640 avformat_network_init();
644 const AVOutputFormat *oformat = av_guess_format(global_flags.stream_mux_name.c_str(), nullptr, nullptr);
645 assert(oformat != nullptr);
647 unique_ptr<AudioEncoder> audio_encoder;
648 if (global_flags.stream_audio_codec_name.empty()) {
649 audio_encoder.reset(new AudioEncoder(AUDIO_OUTPUT_CODEC_NAME, DEFAULT_AUDIO_OUTPUT_BIT_RATE, oformat));
651 audio_encoder.reset(new AudioEncoder(global_flags.stream_audio_codec_name, global_flags.stream_audio_codec_bitrate, oformat));
654 unique_ptr<X264Encoder> x264_encoder(new X264Encoder(oformat, /*use_separate_disk_params=*/false));
655 unique_ptr<Mux> http_mux = create_mux(&httpd, oformat, x264_encoder.get(), audio_encoder.get());
656 if (global_flags.transcode_audio) {
657 audio_encoder->add_mux(http_mux.get());
659 if (global_flags.transcode_video) {
660 x264_encoder->add_mux(http_mux.get());
662 global_x264_encoder = x264_encoder.get();
664 vector<string> filenames = split(argv[optind], ':');
666 FFmpegCapture video(filenames, global_flags.width, global_flags.height);
667 video.set_pixel_format(bmusb::PixelFormat_8BitBGRA);
668 if (global_flags.transcode_video) {
669 video.set_frame_callback(bind(video_frame_callback, &video, x264_encoder.get(), audio_encoder.get(), _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11));
671 video.set_video_callback(bind(raw_packet_callback, http_mux.get(), /*stream_index=*/0, _1, _2));
673 if (!global_flags.transcode_audio && global_flags.enable_audio) {
674 AVBSFContext *bsfctx = nullptr;
675 if (strcmp(oformat->name, "mp4") == 0 && strcmp(audio_encoder->get_codec()->name, "aac") == 0) {
676 // We need to insert the aac_adtstoasc filter, seemingly (or we will get warnings to do so).
677 const AVBitStreamFilter *filter = av_bsf_get_by_name("aac_adtstoasc");
678 int err = av_bsf_alloc(filter, &bsfctx);
680 fprintf(stderr, "av_bsf_alloc() failed with %d\n", err);
684 if (bsfctx == nullptr) {
685 video.set_audio_callback(bind(raw_packet_callback, http_mux.get(), /*stream_index=*/1, _1, _2));
687 video.set_audio_callback(bind(filter_packet_callback, http_mux.get(), /*stream_index=*/1, bsfctx, _1, _2));
690 video.configure_card();
691 video.start_bm_capture();
692 video.change_rate(10.0); // Play as fast as possible.
694 BasicStats basic_stats(/*verbose=*/false, /*use_opengl=*/false);
695 global_basic_stats = &basic_stats;
696 httpd.start(global_flags.http_port);
698 signal(SIGUSR1, adjust_bitrate);
699 signal(SIGUSR2, adjust_bitrate);
700 signal(SIGINT, request_quit);
702 while (!should_quit.should_quit()) {
703 should_quit.sleep_for(hours(1000));
706 video.stop_dequeue_thread();
707 // Stop the x264 encoder before killing the mux it's writing to.
708 global_x264_encoder = nullptr;
709 x264_encoder.reset();