]> git.sesse.net Git - plocate/blobdiff - plocate.cpp
Run clang-format.
[plocate] / plocate.cpp
index de7bd7a0eafc1073b1bfdc72b53166f88d0535ea..c270e79eff24f99481dca08ae47b4cc7abdf6651 100644 (file)
@@ -1,21 +1,28 @@
 #include "db.h"
+#include "dprintf.h"
 #include "io_uring_engine.h"
 #include "parse_trigrams.h"
+#include "serializer.h"
 #include "turbopfor.h"
 #include "unique_sort.h"
 
 #include <algorithm>
 #include <assert.h>
+#include <atomic>
 #include <chrono>
+#include <condition_variable>
+#include <deque>
 #include <fcntl.h>
 #include <fnmatch.h>
 #include <functional>
 #include <getopt.h>
-#include <iosfwd>
+#include <inttypes.h>
 #include <iterator>
 #include <limits>
+#include <locale.h>
+#include <map>
 #include <memory>
-#include <queue>
+#include <mutex>
 #include <regex.h>
 #include <stdint.h>
 #include <stdio.h>
@@ -23,6 +30,8 @@
 #include <string.h>
 #include <string>
 #include <string_view>
+#include <thread>
+#include <tuple>
 #include <unistd.h>
 #include <unordered_map>
 #include <unordered_set>
 using namespace std;
 using namespace std::chrono;
 
-#define dprintf(...) \
-       do { \
-               if (use_debug) { \
-                       fprintf(stderr, __VA_ARGS__); \
-               } \
-       } while (false)
-
 #define DEFAULT_DBPATH "/var/lib/mlocate/plocate.db"
 
 const char *dbpath = DEFAULT_DBPATH;
@@ -50,51 +52,12 @@ bool use_debug = false;
 bool patterns_are_regex = false;
 bool use_extended_regex = false;
 int64_t limit_matches = numeric_limits<int64_t>::max();
+int64_t limit_left = numeric_limits<int64_t>::max();
 
-class Serializer {
-public:
-       bool ready_to_print(int seq) { return next_seq == seq; }
-       void print_delayed(int seq, const vector<string> msg);
-       void release_current();
+steady_clock::time_point start;
+ZSTD_DDict *ddict = nullptr;
 
-private:
-       int next_seq = 0;
-       struct Element {
-               int seq;
-               vector<string> msg;
-
-               bool operator<(const Element &other) const
-               {
-                       return seq > other.seq;
-               }
-       };
-       priority_queue<Element> pending;
-};
-
-void Serializer::print_delayed(int seq, const vector<string> msg)
-{
-       pending.push(Element{ seq, move(msg) });
-}
-
-void Serializer::release_current()
-{
-       ++next_seq;
-
-       // See if any delayed prints can now be dealt with.
-       while (!pending.empty() && pending.top().seq == next_seq) {
-               if (limit_matches-- <= 0)
-                       return;
-               for (const string &msg : pending.top().msg) {
-                       if (print_nul) {
-                               printf("%s%c", msg.c_str(), 0);
-                       } else {
-                               printf("%s\n", msg.c_str());
-                       }
-               }
-               pending.pop();
-               ++next_seq;
-       }
-}
+regex_t compile_regex(const string &needle);
 
 struct Needle {
        enum { STRSTR,
@@ -117,27 +80,85 @@ bool matches(const Needle &needle, const char *haystack)
        }
 }
 
-bool has_access(const char *filename,
-                unordered_map<string, bool> *access_rx_cache)
+class AccessRXCache {
+public:
+       AccessRXCache(IOUringEngine *engine)
+               : engine(engine) {}
+       void check_access(const char *filename, bool allow_async, function<void(bool)> cb);
+
+private:
+       unordered_map<string, bool> cache;
+       struct PendingStat {
+               string filename;
+               function<void(bool)> cb;
+       };
+       map<string, vector<PendingStat>> pending_stats;
+       IOUringEngine *engine;
+       mutex mu;
+};
+
+void AccessRXCache::check_access(const char *filename, bool allow_async, function<void(bool)> cb)
 {
-       const char *end = strchr(filename + 1, '/');
-       while (end != nullptr) {
-               string parent_path(filename, end);
-               auto it = access_rx_cache->find(parent_path);
-               bool ok;
-               if (it == access_rx_cache->end()) {
-                       ok = access(parent_path.c_str(), R_OK | X_OK) == 0;
-                       access_rx_cache->emplace(move(parent_path), ok);
-               } else {
-                       ok = it->second;
+       lock_guard<mutex> lock(mu);
+       if (engine == nullptr || !engine->get_supports_stat()) {
+               allow_async = false;
+       }
+
+       for (const char *end = strchr(filename + 1, '/'); end != nullptr; end = strchr(end + 1, '/')) {
+               string parent_path(filename, end - filename);  // string_view from C++20.
+               auto cache_it = cache.find(parent_path);
+               if (cache_it != cache.end()) {
+                       // Found in the cache.
+                       if (!cache_it->second) {
+                               cb(false);
+                               return;
+                       }
+                       continue;
                }
-               if (!ok) {
-                       return false;
+
+               if (!allow_async) {
+                       bool ok = access(parent_path.c_str(), R_OK | X_OK) == 0;
+                       cache.emplace(parent_path, ok);
+                       if (!ok) {
+                               cb(false);
+                               return;
+                       }
+                       continue;
                }
-               end = strchr(end + 1, '/');
+
+               // We want to call access(), but it could block on I/O. io_uring doesn't support
+               // access(), but we can do a dummy asynchonous statx() to populate the kernel's cache,
+               // which nearly always makes the next access() instantaneous.
+
+               // See if there's already a pending stat that matches this,
+               // or is a subdirectory.
+               auto it = pending_stats.lower_bound(parent_path);
+               if (it != pending_stats.end() && it->first.size() >= parent_path.size() &&
+                   it->first.compare(0, parent_path.size(), parent_path) == 0) {
+                       it->second.emplace_back(PendingStat{ filename, move(cb) });
+               } else {
+                       it = pending_stats.emplace(filename, vector<PendingStat>{}).first;
+                       engine->submit_stat(filename, [this, it, filename{ strdup(filename) }, cb{ move(cb) }] {
+                               // The stat returned, so now do the actual access() calls.
+                               // All of them should be in cache, so don't fire off new statx()
+                               // calls during that check.
+                               check_access(filename, /*allow_async=*/false, move(cb));
+                               free(filename);
+
+                               // Call all others that waited for the same stat() to finish.
+                               // They may fire off new stat() calls if needed.
+                               vector<PendingStat> pending = move(it->second);
+                               pending_stats.erase(it);
+                               for (PendingStat &ps : pending) {
+                                       check_access(ps.filename.c_str(), /*allow_async=*/true, move(ps.cb));
+                               }
+                       });
+               }
+               return;  // The rest will happen in async context.
        }
 
-       return true;
+       // Passed all checks.
+       cb(true);
 }
 
 class Corpus {
@@ -151,6 +172,7 @@ public:
        {
                return hdr.filename_index_offset_bytes + docid * sizeof(uint64_t);
        }
+       const Header &get_hdr() const { return hdr; }
 
 public:
        const int fd;
@@ -177,10 +199,15 @@ Corpus::Corpus(int fd, IOUringEngine *engine)
                fprintf(stderr, "plocate.db is corrupt or an old version; please rebuild it.\n");
                exit(1);
        }
-       if (hdr.version != 0) {
-               fprintf(stderr, "plocate.db has version %u, expected 0; please rebuild it.\n", hdr.version);
+       if (hdr.version != 0 && hdr.version != 1) {
+               fprintf(stderr, "plocate.db has version %u, expected 0 or 1; please rebuild it.\n", hdr.version);
                exit(1);
        }
+       if (hdr.version == 0) {
+               // These will be junk data.
+               hdr.zstd_dictionary_offset_bytes = 0;
+               hdr.zstd_dictionary_length_bytes = 0;
+       }
 }
 
 Corpus::~Corpus()
@@ -222,12 +249,10 @@ size_t Corpus::get_num_filename_blocks() const
        return hdr.num_docids;
 }
 
-uint64_t scan_file_block(const vector<Needle> &needles, string_view compressed,
-                         unordered_map<string, bool> *access_rx_cache, int seq,
-                         Serializer *serializer)
+void scan_file_block(const vector<Needle> &needles, string_view compressed,
+                     AccessRXCache *access_rx_cache, uint64_t seq, ResultReceiver *serializer,
+                     atomic<uint64_t> *matched)
 {
-       uint64_t matched = 0;
-
        unsigned long long uncompressed_len = ZSTD_getFrameContentSize(compressed.data(), compressed.size());
        if (uncompressed_len == ZSTD_CONTENTSIZE_UNKNOWN || uncompressed_len == ZSTD_CONTENTSIZE_ERROR) {
                fprintf(stderr, "ZSTD_getFrameContentSize() failed\n");
@@ -237,17 +262,39 @@ uint64_t scan_file_block(const vector<Needle> &needles, string_view compressed,
        string block;
        block.resize(uncompressed_len + 1);
 
-       size_t err = ZSTD_decompress(&block[0], block.size(), compressed.data(),
-                                    compressed.size());
+       static thread_local ZSTD_DCtx *ctx = ZSTD_createDCtx();  // Reused across calls.
+       size_t err;
+
+       if (ddict != nullptr) {
+               err = ZSTD_decompress_usingDDict(ctx, &block[0], block.size(), compressed.data(),
+                                                compressed.size(), ddict);
+       } else {
+               err = ZSTD_decompressDCtx(ctx, &block[0], block.size(), compressed.data(),
+                                         compressed.size());
+       }
        if (ZSTD_isError(err)) {
                fprintf(stderr, "ZSTD_decompress(): %s\n", ZSTD_getErrorName(err));
                exit(1);
        }
        block[block.size() - 1] = '\0';
 
-       bool immediate_print = (serializer == nullptr || serializer->ready_to_print(seq));
-       vector<string> delayed;
+       auto test_candidate = [&](const char *filename, uint64_t local_seq, uint64_t next_seq) {
+               access_rx_cache->check_access(filename, /*allow_async=*/true, [matched, serializer, local_seq, next_seq, filename{ strdup(filename) }](bool ok) {
+                       if (ok) {
+                               ++*matched;
+                               serializer->print(local_seq, next_seq - local_seq, filename);
+                       } else {
+                               serializer->print(local_seq, next_seq - local_seq, "");
+                       }
+                       free(filename);
+               });
+       };
+
+       // We need to know the next sequence number before inserting into Serializer,
+       // so always buffer one candidate.
+       const char *pending_candidate = nullptr;
 
+       uint64_t local_seq = seq << 32;
        for (const char *filename = block.data();
             filename != block.data() + block.size();
             filename += strlen(filename) + 1) {
@@ -258,59 +305,154 @@ uint64_t scan_file_block(const vector<Needle> &needles, string_view compressed,
                                break;
                        }
                }
-               if (found && has_access(filename, access_rx_cache)) {
-                       if (limit_matches-- <= 0)
-                               break;
-                       ++matched;
-                       if (only_count)
-                               continue;
-                       if (immediate_print) {
-                               if (print_nul) {
-                                       printf("%s%c", filename, 0);
-                               } else {
-                                       printf("%s\n", filename);
-                               }
-                       } else {
-                               delayed.push_back(filename);
+               if (found) {
+                       if (pending_candidate != nullptr) {
+                               test_candidate(pending_candidate, local_seq, local_seq + 1);
+                               ++local_seq;
                        }
+                       pending_candidate = filename;
                }
        }
-       if (serializer != nullptr && !only_count) {
-               if (immediate_print) {
-                       serializer->release_current();
-               } else {
-                       serializer->print_delayed(seq, move(delayed));
-               }
+       if (pending_candidate == nullptr) {
+               serializer->print(seq << 32, 1ULL << 32, "");
+       } else {
+               test_candidate(pending_candidate, local_seq, (seq + 1) << 32);
        }
-       return matched;
 }
 
 size_t scan_docids(const vector<Needle> &needles, const vector<uint32_t> &docids, const Corpus &corpus, IOUringEngine *engine)
 {
        Serializer docids_in_order;
-       unordered_map<string, bool> access_rx_cache;
-       uint64_t matched = 0;
+       AccessRXCache access_rx_cache(engine);
+       atomic<uint64_t> matched{ 0 };
        for (size_t i = 0; i < docids.size(); ++i) {
                uint32_t docid = docids[i];
                corpus.get_compressed_filename_block(docid, [i, &matched, &needles, &access_rx_cache, &docids_in_order](string_view compressed) {
-                       matched += scan_file_block(needles, compressed, &access_rx_cache, i, &docids_in_order);
+                       scan_file_block(needles, compressed, &access_rx_cache, i, &docids_in_order, &matched);
                });
        }
        engine->finish();
        return matched;
 }
 
+struct WorkerThread {
+       thread t;
+
+       // We use a result queue instead of synchronizing Serializer,
+       // since a lock on it becomes a huge choke point if there are
+       // lots of threads.
+       mutex result_mu;
+       struct Result {
+               uint64_t seq;
+               uint64_t skip;
+               string msg;
+       };
+       vector<Result> results;
+};
+
+class WorkerThreadReceiver : public ResultReceiver {
+public:
+       WorkerThreadReceiver(WorkerThread *wt)
+               : wt(wt) {}
+
+       void print(uint64_t seq, uint64_t skip, const string msg) override
+       {
+               lock_guard<mutex> lock(wt->result_mu);
+               if (msg.empty() && !wt->results.empty() && wt->results.back().seq + wt->results.back().skip == seq) {
+                       wt->results.back().skip += skip;
+               } else {
+                       wt->results.emplace_back(WorkerThread::Result{ seq, skip, move(msg) });
+               }
+       }
+
+private:
+       WorkerThread *wt;
+};
+
+void deliver_results(WorkerThread *wt, Serializer *serializer)
+{
+       vector<WorkerThread::Result> results;
+       {
+               lock_guard<mutex> lock(wt->result_mu);
+               results = move(wt->results);
+       }
+       for (const WorkerThread::Result &result : results) {
+               serializer->print(result.seq, result.skip, move(result.msg));
+       }
+}
+
 // We do this sequentially, as it's faster than scattering
 // a lot of I/O through io_uring and hoping the kernel will
-// coalesce it plus readahead for us.
-uint64_t scan_all_docids(const vector<Needle> &needles, int fd, const Corpus &corpus, IOUringEngine *engine)
+// coalesce it plus readahead for us. Since we assume that
+// we will primarily be CPU-bound, we'll be firing up one
+// worker thread for each spare core (the last one will
+// only be doing I/O). access() is still synchronous.
+uint64_t scan_all_docids(const vector<Needle> &needles, int fd, const Corpus &corpus)
 {
-       unordered_map<string, bool> access_rx_cache;
+       {
+               const Header &hdr = corpus.get_hdr();
+               if (hdr.zstd_dictionary_length_bytes > 0) {
+                       string dictionary;
+                       dictionary.resize(hdr.zstd_dictionary_length_bytes);
+                       complete_pread(fd, &dictionary[0], hdr.zstd_dictionary_length_bytes, hdr.zstd_dictionary_offset_bytes);
+                       ddict = ZSTD_createDDict(dictionary.data(), dictionary.size());
+               }
+       }
+
+       AccessRXCache access_rx_cache(nullptr);
+       Serializer serializer;
        uint32_t num_blocks = corpus.get_num_filename_blocks();
        unique_ptr<uint64_t[]> offsets(new uint64_t[num_blocks + 1]);
        complete_pread(fd, offsets.get(), (num_blocks + 1) * sizeof(uint64_t), corpus.offset_for_block(0));
+       atomic<uint64_t> matched{ 0 };
+
+       mutex mu;
+       condition_variable queue_added, queue_removed;
+       deque<tuple<int, int, string>> work_queue;  // Under mu.
+       bool done = false;  // Under mu.
+
+       unsigned num_threads = max<int>(sysconf(_SC_NPROCESSORS_ONLN) - 1, 1);
+       dprintf("Using %u worker threads for linear scan.\n", num_threads);
+       unique_ptr<WorkerThread[]> threads(new WorkerThread[num_threads]);
+       for (unsigned i = 0; i < num_threads; ++i) {
+               threads[i].t = thread([&threads, &mu, &queue_added, &queue_removed, &work_queue, &done, &offsets, &needles, &access_rx_cache, &matched, i] {
+                       // regcomp() takes a lock on the regex, so each thread will need its own.
+                       const vector<Needle> *use_needles = &needles;
+                       vector<Needle> recompiled_needles;
+                       if (i != 0 && patterns_are_regex) {
+                               recompiled_needles = needles;
+                               for (Needle &needle : recompiled_needles) {
+                                       needle.re = compile_regex(needle.str);
+                               }
+                               use_needles = &recompiled_needles;
+                       }
+
+                       WorkerThreadReceiver receiver(&threads[i]);
+                       for (;;) {
+                               uint32_t io_docid, last_docid;
+                               string compressed;
+
+                               {
+                                       unique_lock<mutex> lock(mu);
+                                       queue_added.wait(lock, [&work_queue, &done] { return !work_queue.empty() || done; });
+                                       if (done && work_queue.empty()) {
+                                               return;
+                                       }
+                                       tie(io_docid, last_docid, compressed) = move(work_queue.front());
+                                       work_queue.pop_front();
+                                       queue_removed.notify_all();
+                               }
+
+                               for (uint32_t docid = io_docid; docid < last_docid; ++docid) {
+                                       size_t relative_offset = offsets[docid] - offsets[io_docid];
+                                       size_t len = offsets[docid + 1] - offsets[docid];
+                                       scan_file_block(*use_needles, { &compressed[relative_offset], len }, &access_rx_cache, docid, &receiver, &matched);
+                               }
+                       }
+               });
+       }
+
        string compressed;
-       uint64_t matched = 0;
        for (uint32_t io_docid = 0; io_docid < num_blocks; io_docid += 32) {
                uint32_t last_docid = std::min(io_docid + 32, num_blocks);
                size_t io_len = offsets[last_docid] - offsets[io_docid];
@@ -319,13 +461,27 @@ uint64_t scan_all_docids(const vector<Needle> &needles, int fd, const Corpus &co
                }
                complete_pread(fd, &compressed[0], io_len, offsets[io_docid]);
 
-               for (uint32_t docid = io_docid; docid < last_docid; ++docid) {
-                       size_t relative_offset = offsets[docid] - offsets[io_docid];
-                       size_t len = offsets[docid + 1] - offsets[docid];
-                       matched += scan_file_block(needles, { &compressed[relative_offset], len }, &access_rx_cache, 0, nullptr);
-                       if (limit_matches <= 0)
-                               return matched;
+               {
+                       unique_lock<mutex> lock(mu);
+                       queue_removed.wait(lock, [&work_queue] { return work_queue.size() < 256; });  // Allow ~2MB of data queued up.
+                       work_queue.emplace_back(io_docid, last_docid, move(compressed));
+                       queue_added.notify_one();  // Avoid the thundering herd.
                }
+
+               // Pick up some results, so that we are sure that we won't just overload.
+               // (Seemingly, going through all of these causes slowness with many threads,
+               // but taking only one is OK.)
+               unsigned i = io_docid / 32;
+               deliver_results(&threads[i % num_threads], &serializer);
+       }
+       {
+               lock_guard<mutex> lock(mu);
+               done = true;
+               queue_added.notify_all();
+       }
+       for (unsigned i = 0; i < num_threads; ++i) {
+               threads[i].t.join();
+               deliver_results(&threads[i], &serializer);
        }
        return matched;
 }
@@ -391,7 +547,7 @@ void do_search_file(const vector<Needle> &needles, const char *filename)
                exit(EXIT_FAILURE);
        }
 
-       steady_clock::time_point start __attribute__((unused)) = steady_clock::now();
+       start = steady_clock::now();
        if (access("/", R_OK | X_OK)) {
                // We can't find anything, no need to bother...
                return;
@@ -436,13 +592,28 @@ void do_search_file(const vector<Needle> &needles, const char *filename)
                // (We could have searched through all trigrams that matched
                // the pattern and done a union of them, but that's a lot of
                // work for fairly unclear gain.)
-               uint64_t matched = scan_all_docids(needles, fd, corpus, &engine);
+               uint64_t matched = scan_all_docids(needles, fd, corpus);
                if (only_count) {
-                       printf("%zu\n", matched);
+                       printf("%" PRId64 "\n", matched);
                }
                return;
        }
 
+       // Sneak in fetching the dictionary, if present. It's not necessarily clear
+       // exactly where it would be cheapest to get it, but it needs to be present
+       // before we can decode any of the posting lists. Most likely, it's
+       // in the same filesystem block as the header anyway, so it should be
+       // present in the cache.
+       {
+               const Header &hdr = corpus.get_hdr();
+               if (hdr.zstd_dictionary_length_bytes > 0) {
+                       engine.submit_read(fd, hdr.zstd_dictionary_length_bytes, hdr.zstd_dictionary_offset_bytes, [](string_view s) {
+                               ddict = ZSTD_createDDict(s.data(), s.size());
+                               dprintf("Dictionary initialized after %.1f ms.\n", 1e3 * duration<float>(steady_clock::now() - start).count());
+                       });
+               }
+       }
+
        // Look them all up on disk.
        for (auto &[trgm, trigram_groups] : trigrams_to_lookup) {
                corpus.find_trigram(trgm, [trgm{ trgm }, trigram_groups{ &trigram_groups }](const Trigram *trgmptr, size_t len) {
@@ -558,11 +729,11 @@ void do_search_file(const vector<Needle> &needles, const char *filename)
                1e3 * duration<float>(steady_clock::now() - start).count());
 
        uint64_t matched = scan_docids(needles, cur_candidates, corpus, &engine);
-       dprintf("Done in %.1f ms, found %zu matches.\n",
+       dprintf("Done in %.1f ms, found %" PRId64 " matches.\n",
                1e3 * duration<float>(steady_clock::now() - start).count(), matched);
 
        if (only_count) {
-               printf("%zu\n", matched);
+               printf("%" PRId64 "\n", matched);
        }
 }
 
@@ -667,7 +838,7 @@ int main(int argc, char **argv)
                        break;
                case 'l':
                case 'n':
-                       limit_matches = atoll(optarg);
+                       limit_matches = limit_left = atoll(optarg);
                        if (limit_matches <= 0) {
                                fprintf(stderr, "Error: limit must be a strictly positive number.\n");
                                exit(1);