3 Copyright (C) 2005, 2007, 2008 Red Hat, Inc. All rights reserved.
5 This copyrighted material is made available to anyone wishing to use, modify,
6 copy, or redistribute it subject to the terms and conditions of the GNU General
9 This program is distributed in the hope that it will be useful, but WITHOUT ANY
10 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
11 PARTICULAR PURPOSE. See the GNU General Public License for more details.
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
15 Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 Author: Miloslav Trmac <mitr@redhat.com>
20 plocate modifications: Copyright (C) 2020 Steinar H. Gunderson.
21 plocate parts and modifications are licensed under the GPLv2 or, at your option,
25 #include "bind-mount.h"
26 #include "complete_pread.h"
28 #include "database-builder.h"
31 #include "io_uring_engine.h"
35 #include <arpa/inet.h>
53 #include <sys/resource.h>
56 #include <sys/types.h>
62 using namespace std::chrono;
64 /* Next conf_prunepaths entry */
65 static size_t conf_prunepaths_index; /* = 0; */
70 "Usage: updatedb PLOCATE_DB\n"
72 "Generate plocate index from mlocate.db, typically /var/lib/mlocate/mlocate.db.\n"
73 "Normally, the destination should be /var/lib/mlocate/plocate.db.\n"
75 " -b, --block-size SIZE number of filenames to store in each block (default 32)\n"
76 " -p, --plaintext input is a plaintext file, not an mlocate database\n"
77 " --help print this help\n"
78 " --version print version information\n");
83 printf("updatedb %s\n", PACKAGE_VERSION);
84 printf("Copyright (C) 2007 Red Hat, Inc. All rights reserved.\n");
85 printf("Copyright 2020 Steinar H. Gunderson\n");
86 printf("This software is distributed under the GPL v.2.\n");
88 printf("This program is provided with NO WARRANTY, to the extent permitted by law.\n");
91 int opendir_noatime(int dirfd, const char *path)
93 static bool noatime_failed = false;
95 if (!noatime_failed) {
97 int fd = openat(dirfd, path, O_RDONLY | O_DIRECTORY | O_NOATIME);
99 int fd = openat(dirfd, path, O_RDONLY | O_DIRECTORY);
103 } else if (errno == EPERM) {
104 /* EPERM is fairly O_NOATIME-specific; missing access rights cause
106 noatime_failed = true;
112 return openat(dirfd, path, O_RDONLY | O_DIRECTORY);
115 bool time_is_current(const dir_time &t)
117 static dir_time cache{ 0, 0 };
119 /* This is more difficult than it should be because Linux uses a cheaper time
120 source for filesystem timestamps than for gettimeofday() and they can get
121 slightly out of sync, see
122 https://bugzilla.redhat.com/show_bug.cgi?id=244697 . This affects even
123 nanosecond timestamps (and don't forget that tv_nsec existence doesn't
124 guarantee that the underlying filesystem has such resolution - it might be
125 microseconds or even coarser).
127 The worst case is probably FAT timestamps with 2-second resolution
128 (although using such a filesystem violates POSIX file times requirements).
130 So, to be on the safe side, require a >3.0 second difference (2 seconds to
131 make sure the FAT timestamp changed, 1 more to account for the Linux
132 timestamp races). This large margin might make updatedb marginally more
133 expensive, but it only makes a difference if the directory was very
134 recently updated _and_ is will not be updated again until the next
135 updatedb run; this is not likely to happen for most directories. */
137 /* Cache gettimeofday () results to rule out obviously old time stamps;
138 CACHE contains the earliest time we reject as too current. */
144 gettimeofday(&tv, nullptr);
145 cache.sec = tv.tv_sec - 3;
146 cache.nsec = tv.tv_usec * 1000;
155 // For directories only:
157 dir_time dt = unknown_dir_time;
158 dir_time db_modified = unknown_dir_time;
162 bool filesystem_is_excluded(const string &path)
164 if (conf_debug_pruning) {
165 fprintf(stderr, "Checking whether filesystem `%s' is excluded:\n", path.c_str());
167 FILE *f = setmntent("/proc/mounts", "r");
173 while ((me = getmntent(f)) != nullptr) {
174 if (conf_debug_pruning) {
175 fprintf(stderr, " `%s', type `%s'\n", me->mnt_dir, me->mnt_type);
177 if (path != me->mnt_dir) {
180 string type(me->mnt_type);
181 for (char &p : type) {
184 bool exclude = (find(conf_prunefs.begin(), conf_prunefs.end(), type) != conf_prunefs.end());
185 if (exclude && conf_debug_pruning) {
186 fprintf(stderr, " => excluded due to filesystem type\n");
191 if (conf_debug_pruning) {
192 fprintf(stderr, "...not found in mount list\n");
198 dir_time get_dirtime_from_stat(const struct stat &buf)
200 dir_time ctime{ buf.st_ctim.tv_sec, int32_t(buf.st_ctim.tv_nsec) };
201 dir_time mtime{ buf.st_mtim.tv_sec, int32_t(buf.st_mtim.tv_nsec) };
202 dir_time dt = max(ctime, mtime);
204 if (time_is_current(dt)) {
205 /* The directory might be changing right now and we can't be sure the
206 timestamp will be changed again if more changes happen very soon, mark
207 the timestamp as invalid to force rescanning the directory next time
209 return unknown_dir_time;
215 // Represents the old database we are updating.
218 explicit ExistingDB(int fd);
221 pair<string, dir_time> read_next();
222 void unread(pair<string, dir_time> record)
224 unread_record = move(record);
226 string read_next_dictionary() const;
227 bool get_error() const { return error; }
233 uint32_t current_docid = 0;
235 string current_filename_block;
236 const char *current_filename_ptr = nullptr, *current_filename_end = nullptr;
238 off_t compressed_dir_time_pos;
239 string compressed_dir_time;
240 string current_dir_time_block;
241 const char *current_dir_time_ptr = nullptr, *current_dir_time_end = nullptr;
243 pair<string, dir_time> unread_record;
245 // Used in one-shot mode, repeatedly.
248 // Used in streaming mode.
249 ZSTD_DCtx *dir_time_ctx;
251 ZSTD_DDict *ddict = nullptr;
253 // If true, we've discovered an error or EOF, and will return only
254 // empty data from here.
255 bool eof = false, error = false;
258 ExistingDB::ExistingDB(int fd)
266 if (!try_complete_pread(fd, &hdr, sizeof(hdr), /*offset=*/0)) {
268 perror("pread(header)");
273 if (memcmp(hdr.magic, "\0plocate", 8) != 0) {
275 fprintf(stderr, "Old database had header mismatch, ignoring.\n");
280 if (hdr.version != 1 || hdr.max_version < 2) {
282 fprintf(stderr, "Old database had version mismatch (version=%d max_version=%d), ignoring.\n",
283 hdr.version, hdr.max_version);
289 // Compare the configuration block with our current one.
290 if (hdr.conf_block_length_bytes != conf_block.size()) {
292 fprintf(stderr, "Old database had different configuration block (size mismatch), ignoring.\n");
298 str.resize(hdr.conf_block_length_bytes);
299 if (!try_complete_pread(fd, str.data(), hdr.conf_block_length_bytes, hdr.conf_block_offset_bytes)) {
301 perror("pread(conf_block)");
306 if (str != conf_block) {
308 fprintf(stderr, "Old database had different configuration block (contents mismatch), ignoring.\n");
314 // Read dictionary, if it exists.
315 if (hdr.zstd_dictionary_length_bytes > 0) {
317 dictionary.resize(hdr.zstd_dictionary_length_bytes);
318 if (try_complete_pread(fd, &dictionary[0], hdr.zstd_dictionary_length_bytes, hdr.zstd_dictionary_offset_bytes)) {
319 ddict = ZSTD_createDDict(dictionary.data(), dictionary.size());
322 perror("pread(dictionary)");
328 compressed_dir_time_pos = hdr.directory_data_offset_bytes;
330 ctx = ZSTD_createDCtx();
331 dir_time_ctx = ZSTD_createDCtx();
334 ExistingDB::~ExistingDB()
341 pair<string, dir_time> ExistingDB::read_next()
343 if (!unread_record.first.empty()) {
344 auto ret = move(unread_record);
345 unread_record.first.clear();
350 return { "", not_a_dir };
353 // See if we need to read a new filename block.
354 if (current_filename_ptr == nullptr) {
355 if (current_docid >= hdr.num_docids) {
357 return { "", not_a_dir };
360 // Read the file offset from this docid and the next one.
361 // This is always allowed, since we have a sentinel block at the end.
362 off_t offset_for_block = hdr.filename_index_offset_bytes + current_docid * sizeof(uint64_t);
364 if (!try_complete_pread(fd, vals, sizeof(vals), offset_for_block)) {
366 perror("pread(offset)");
369 return { "", not_a_dir };
372 off_t offset = vals[0];
373 size_t compressed_len = vals[1] - vals[0];
374 unique_ptr<char[]> compressed(new char[compressed_len]);
375 if (!try_complete_pread(fd, compressed.get(), compressed_len, offset)) {
377 perror("pread(block)");
380 return { "", not_a_dir };
383 unsigned long long uncompressed_len = ZSTD_getFrameContentSize(compressed.get(), compressed_len);
384 if (uncompressed_len == ZSTD_CONTENTSIZE_UNKNOWN || uncompressed_len == ZSTD_CONTENTSIZE_ERROR) {
386 fprintf(stderr, "ZSTD_getFrameContentSize() failed\n");
389 return { "", not_a_dir };
393 block.resize(uncompressed_len + 1);
396 if (ddict != nullptr) {
397 err = ZSTD_decompress_usingDDict(ctx, &block[0], block.size(), compressed.get(),
398 compressed_len, ddict);
400 err = ZSTD_decompressDCtx(ctx, &block[0], block.size(), compressed.get(),
403 if (ZSTD_isError(err)) {
405 fprintf(stderr, "ZSTD_decompress(): %s\n", ZSTD_getErrorName(err));
408 return { "", not_a_dir };
410 block[block.size() - 1] = '\0';
411 current_filename_block = move(block);
412 current_filename_ptr = current_filename_block.data();
413 current_filename_end = current_filename_block.data() + current_filename_block.size();
417 // See if we need to read more directory time data.
418 while (current_dir_time_ptr == current_dir_time_end ||
419 (*current_dir_time_ptr != 0 &&
420 size_t(current_dir_time_end - current_dir_time_ptr) < sizeof(dir_time) + 1)) {
421 if (current_dir_time_ptr != nullptr) {
422 const size_t bytes_consumed = current_dir_time_ptr - current_dir_time_block.data();
423 current_dir_time_block.erase(current_dir_time_block.begin(), current_dir_time_block.begin() + bytes_consumed);
426 // See if we can get more data out without reading more.
427 const size_t existing_data = current_dir_time_block.size();
428 current_dir_time_block.resize(existing_data + 4096);
430 ZSTD_outBuffer outbuf;
431 outbuf.dst = current_dir_time_block.data() + existing_data;
436 inbuf.src = compressed_dir_time.data();
437 inbuf.size = compressed_dir_time.size();
440 int err = ZSTD_decompressStream(dir_time_ctx, &outbuf, &inbuf);
443 fprintf(stderr, "ZSTD_decompress(): %s\n", ZSTD_getErrorName(err));
446 return { "", not_a_dir };
448 compressed_dir_time.erase(compressed_dir_time.begin(), compressed_dir_time.begin() + inbuf.pos);
449 current_dir_time_block.resize(existing_data + outbuf.pos);
451 if (inbuf.pos == 0 && outbuf.pos == 0) {
452 // No movement, we'll need to try to read more data.
454 size_t bytes_to_read = min<size_t>(
455 hdr.directory_data_offset_bytes + hdr.directory_data_length_bytes - compressed_dir_time_pos,
457 if (bytes_to_read == 0) {
459 return { "", not_a_dir };
461 if (!try_complete_pread(fd, buf, bytes_to_read, compressed_dir_time_pos)) {
463 perror("pread(dirtime)");
466 return { "", not_a_dir };
468 compressed_dir_time_pos += bytes_to_read;
469 compressed_dir_time.insert(compressed_dir_time.end(), buf, buf + bytes_to_read);
471 // Next iteration will now try decompressing more.
474 current_dir_time_ptr = current_dir_time_block.data();
475 current_dir_time_end = current_dir_time_block.data() + current_dir_time_block.size();
478 string filename = current_filename_ptr;
479 current_filename_ptr += filename.size() + 1;
480 if (current_filename_ptr == current_filename_end) {
481 // End of this block.
482 current_filename_ptr = nullptr;
485 if (*current_dir_time_ptr == 0) {
486 ++current_dir_time_ptr;
487 return { move(filename), not_a_dir };
489 ++current_dir_time_ptr;
491 memcpy(&dt.sec, current_dir_time_ptr, sizeof(dt.sec));
492 current_dir_time_ptr += sizeof(dt.sec);
493 memcpy(&dt.nsec, current_dir_time_ptr, sizeof(dt.nsec));
494 current_dir_time_ptr += sizeof(dt.nsec);
495 return { move(filename), dt };
499 string ExistingDB::read_next_dictionary() const
501 if (hdr.next_zstd_dictionary_length_bytes == 0 || hdr.next_zstd_dictionary_length_bytes > 1048576) {
505 str.resize(hdr.next_zstd_dictionary_length_bytes);
506 if (!try_complete_pread(fd, str.data(), hdr.next_zstd_dictionary_length_bytes, hdr.next_zstd_dictionary_offset_bytes)) {
508 perror("pread(next_dictionary)");
515 // Scans the directory with absolute path “path”, which is opened as “fd”.
516 // Uses relative paths and openat() only, evading any issues with PATH_MAX
517 // and time-of-check-time-of-use race conditions. (mlocate's updatedb
518 // does a much more complicated dance with changing the current working
519 // directory, probably in the interest of portability to old platforms.)
520 // “parent_dev” must be the device of the parent directory of “path”.
522 // Takes ownership of fd.
523 int scan(const string &path, int fd, dev_t parent_dev, dir_time modified, dir_time db_modified, ExistingDB *existing_db, DatabaseReceiver *corpus, DictionaryBuilder *dict_builder)
525 if (string_list_contains_dir_path(&conf_prunepaths, &conf_prunepaths_index, path)) {
526 if (conf_debug_pruning) {
527 fprintf(stderr, "Skipping `%s': in prunepaths\n", path.c_str());
532 if (conf_prune_bind_mounts && is_bind_mount(path.c_str())) {
533 if (conf_debug_pruning) {
534 fprintf(stderr, "Skipping `%s': bind mount\n", path.c_str());
540 // We read in the old directory no matter whether it is current or not,
541 // because even if we're not going to use it, we'll need the modification directory
542 // of any subdirectories.
544 // Skip over anything before this directory; it is stuff that we would have
545 // consumed earlier if we wanted it.
547 pair<string, dir_time> record = existing_db->read_next();
548 if (record.first.empty()) {
551 if (dir_path_cmp(path, record.first) <= 0) {
552 existing_db->unread(move(record));
557 // Now read everything in this directory.
558 vector<entry> db_entries;
559 const string path_plus_slash = path.back() == '/' ? path : path + '/';
561 pair<string, dir_time> record = existing_db->read_next();
562 if (record.first.empty()) {
566 if (record.first.rfind(path_plus_slash, 0) != 0) {
567 // No longer starts with path, so we're in a different directory.
568 existing_db->unread(move(record));
571 if (record.first.find_first_of('/', path_plus_slash.size()) != string::npos) {
572 // Entered into a subdirectory of a subdirectory.
573 // Due to our ordering, this also means we're done.
574 existing_db->unread(move(record));
579 e.name = record.first.substr(path_plus_slash.size());
580 e.is_directory = (record.second.sec >= 0);
581 e.db_modified = record.second;
582 db_entries.push_back(e);
586 vector<entry> entries;
587 if (!existing_db->get_error() && db_modified.sec > 0 &&
588 modified.sec == db_modified.sec && modified.nsec == db_modified.nsec) {
589 // Not changed since the last database, so we can replace the readdir()
590 // by reading from the database. (We still need to open and stat everything,
591 // though, but that happens in a later step.)
592 entries = move(db_entries);
594 for (const entry &e : entries) {
595 printf("%s/%s\n", path.c_str(), e.name.c_str());
599 dir = fdopendir(fd); // Takes over ownership of fd.
600 if (dir == nullptr) {
601 // fdopendir() wants to fstat() the fd to verify that it's indeed
602 // a directory, which can seemingly fail on at least CIFS filesystems
603 // if the server feels like it. We treat this as if we had an error
604 // on opening it, ie., ignore the directory.
610 while ((de = readdir(dir)) != nullptr) {
611 if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) {
614 if (strlen(de->d_name) == 0) {
615 /* Unfortunately, this does happen, and mere assert() does not give
616 users enough information to complain to the right people. */
617 fprintf(stderr, "file system error: zero-length file name in directory %s", path.c_str());
623 if (de->d_type == DT_UNKNOWN) {
624 // Evidently some file systems, like older versions of XFS
625 // (mkfs.xfs -m crc=0 -n ftype=0), can return this,
626 // and we need a stat(). If we wanted to optimize for this,
627 // we could probably defer it to later (we're stat-ing directories
628 // when recursing), but this is rare, and not really worth it --
629 // the second stat() will be cached anyway.
631 if (fstatat(fd, de->d_name, &buf, AT_SYMLINK_NOFOLLOW) == 0 &&
632 S_ISDIR(buf.st_mode)) {
633 e.is_directory = true;
635 e.is_directory = false;
638 e.is_directory = (de->d_type == DT_DIR);
642 printf("%s/%s\n", path.c_str(), de->d_name);
644 entries.push_back(move(e));
647 sort(entries.begin(), entries.end(), [](const entry &a, const entry &b) {
648 return a.name < b.name;
651 // Load directory modification times from the old database.
652 auto db_it = db_entries.begin();
653 for (entry &e : entries) {
654 for (; db_it != db_entries.end(); ++db_it) {
655 if (e.name < db_it->name) {
658 if (e.name == db_it->name) {
659 e.db_modified = db_it->db_modified;
666 // For each entry, we want to add it to the database. but this includes the modification time
667 // for directories, which means we need to open and stat it at this point.
669 // This means we may need to have many directories open at the same time, but it seems to be
670 // the simplest (only?) way of being compatible with mlocate's notion of listing all contents
671 // of a given directory before recursing, without buffering even more information. Hopefully,
672 // we won't go out of file descriptors here (it could happen if someone has tens of thousands
673 // of subdirectories in a single directory); if so, the admin will need to raise the limit.
674 for (entry &e : entries) {
675 if (!e.is_directory) {
680 if (find(conf_prunenames.begin(), conf_prunenames.end(), e.name) != conf_prunenames.end()) {
681 if (conf_debug_pruning) {
682 fprintf(stderr, "Skipping `%s': in prunenames\n", e.name.c_str());
687 e.fd = opendir_noatime(fd, e.name.c_str());
689 if (errno == EMFILE || errno == ENFILE) {
690 // The admin probably wants to know about this.
691 perror((path_plus_slash + e.name).c_str());
694 if (getrlimit(RLIMIT_NOFILE, &rlim) == -1) {
695 fprintf(stderr, "Hint: Try `ulimit -n 131072' or similar.\n");
697 fprintf(stderr, "Hint: Try `ulimit -n %" PRIu64 " or similar (current limit is %" PRIu64 ").\n",
698 static_cast<uint64_t>(rlim.rlim_cur * 2), static_cast<uint64_t>(rlim.rlim_cur));
706 if (fstat(e.fd, &buf) != 0) {
707 // It's possible that this is a filesystem that's excluded
708 // (and the failure is e.g. because the network is down).
709 // As a last-ditch effort, we try to check that before dying,
710 // i.e., duplicate the check from further down.
712 // It would be better to be able to run filesystem_is_excluded()
713 // for cheap on everything and just avoid the stat, but it seems
714 // hard to do that without any kind of raciness.
715 if (filesystem_is_excluded(path_plus_slash + e.name)) {
721 perror((path_plus_slash + e.name).c_str());
726 if (buf.st_dev != parent_dev) {
727 if (filesystem_is_excluded(path_plus_slash + e.name)) {
734 e.dt = get_dirtime_from_stat(buf);
737 // Actually add all the entries we figured out dates for above.
738 for (const entry &e : entries) {
739 corpus->add_file(path_plus_slash + e.name, e.dt);
740 dict_builder->add_file(path_plus_slash + e.name, e.dt);
743 // Now scan subdirectories.
744 for (const entry &e : entries) {
745 if (e.is_directory && e.fd != -1) {
746 int ret = scan(path_plus_slash + e.name, e.fd, e.dev, e.dt, e.db_modified, existing_db, corpus, dict_builder);
748 // TODO: The unscanned file descriptors will leak, but it doesn't really matter,
749 // as we're about to exit.
756 if (dir == nullptr) {
764 int main(int argc, char **argv)
766 // We want to bump the file limit; do it if we can (usually we are root
767 // and can set whatever we want). 128k should be ample for most setups.
769 if (getrlimit(RLIMIT_NOFILE, &rlim) != -1) {
770 // Even root cannot increase rlim_cur beyond rlim_max,
771 // so we need to try to increase rlim_max first.
772 // Ignore errors, though.
773 if (rlim.rlim_max < 131072) {
774 rlim.rlim_max = 131072;
775 setrlimit(RLIMIT_NOFILE, &rlim);
776 getrlimit(RLIMIT_NOFILE, &rlim);
779 rlim_t wanted = std::max<rlim_t>(rlim.rlim_cur, 131072);
780 rlim.rlim_cur = std::min<rlim_t>(wanted, rlim.rlim_max);
781 setrlimit(RLIMIT_NOFILE, &rlim); // Ignore errors.
784 conf_prepare(argc, argv);
785 if (conf_prune_bind_mounts) {
789 int fd = open(conf_output.c_str(), O_RDONLY);
790 ExistingDB existing_db(fd);
792 DictionaryBuilder dict_builder(/*blocks_to_keep=*/1000, conf_block_size);
795 if (conf_check_visibility) {
796 group *grp = getgrnam(GROUPNAME);
797 if (grp == nullptr) {
798 fprintf(stderr, "Unknown group %s\n", GROUPNAME);
804 DatabaseBuilder db(conf_output.c_str(), owner, conf_block_size, existing_db.read_next_dictionary(), conf_check_visibility);
805 db.set_conf_block(conf_block);
806 DatabaseReceiver *corpus = db.start_corpus(/*store_dir_times=*/true);
808 int root_fd = opendir_noatime(AT_FDCWD, conf_scan_root);
815 if (fstat(root_fd, &buf) == -1) {
820 scan(conf_scan_root, root_fd, buf.st_dev, get_dirtime_from_stat(buf), /*db_modified=*/unknown_dir_time, &existing_db, corpus, &dict_builder);
822 // It's too late to use the dictionary for the data we already compressed,
823 // unless we wanted to either scan the entire file system again (acceptable
824 // for plocate-build where it's cheap, less so for us), or uncompressing
825 // and recompressing. Instead, we store it for next time, assuming that the
826 // data changes fairly little from time to time.
827 string next_dictionary = dict_builder.train(1024);
828 db.set_next_dictionary(next_dictionary);