+#include "access_rx_cache.h"
+#include "complete_pread.h"
#include "db.h"
#include "dprintf.h"
#include "io_uring_engine.h"
+#include "needle.h"
#include "parse_trigrams.h"
+#include "serializer.h"
#include "turbopfor.h"
#include "unique_sort.h"
#include <algorithm>
#include <assert.h>
+#include <atomic>
#include <chrono>
+#include <condition_variable>
+#include <deque>
#include <fcntl.h>
-#include <fnmatch.h>
#include <functional>
#include <getopt.h>
#include <inttypes.h>
-#include <iosfwd>
#include <iterator>
#include <limits>
-#include <map>
+#include <locale.h>
#include <memory>
-#include <queue>
+#include <mutex>
#include <regex.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <string>
#include <string_view>
+#include <thread>
+#include <tuple>
#include <unistd.h>
#include <unordered_map>
#include <unordered_set>
using namespace std;
using namespace std::chrono;
-#define DEFAULT_DBPATH "/var/lib/mlocate/plocate.db"
-
-const char *dbpath = DEFAULT_DBPATH;
+const char *dbpath = DBFILE;
bool ignore_case = false;
bool only_count = false;
bool print_nul = false;
bool use_debug = false;
+bool flush_cache = false;
bool patterns_are_regex = false;
bool use_extended_regex = false;
+bool match_basename = false;
int64_t limit_matches = numeric_limits<int64_t>::max();
int64_t limit_left = numeric_limits<int64_t>::max();
+bool stdout_is_tty = false;
steady_clock::time_point start;
ZSTD_DDict *ddict = nullptr;
-void apply_limit()
-{
- if (--limit_left > 0) {
- return;
- }
- dprintf("Done in %.1f ms, found %" PRId64 " matches.\n",
- 1e3 * duration<float>(steady_clock::now() - start).count(), limit_matches);
- if (only_count) {
- printf("%" PRId64 "\n", limit_matches);
- }
- exit(0);
-}
-
-class Serializer {
-public:
- ~Serializer() { assert(limit_left <= 0 || pending.empty()); }
- void print(uint64_t seq, uint64_t skip, const string msg);
-
-private:
- uint64_t next_seq = 0;
- struct Element {
- uint64_t seq, skip;
- string msg;
-
- bool operator<(const Element &other) const
- {
- return seq > other.seq;
- }
- };
- priority_queue<Element> pending;
-};
-
-void Serializer::print(uint64_t seq, uint64_t skip, const string msg)
-{
- if (only_count) {
- if (!msg.empty()) {
- apply_limit();
- }
- return;
- }
-
- if (next_seq != seq) {
- pending.push(Element{ seq, skip, move(msg) });
- return;
- }
-
- if (!msg.empty()) {
- if (print_nul) {
- printf("%s%c", msg.c_str(), 0);
- } else {
- printf("%s\n", msg.c_str());
- }
- apply_limit();
- }
- next_seq += skip;
-
- // See if any delayed prints can now be dealt with.
- while (!pending.empty() && pending.top().seq == next_seq) {
- if (!pending.top().msg.empty()) {
- if (print_nul) {
- printf("%s%c", pending.top().msg.c_str(), 0);
- } else {
- printf("%s\n", pending.top().msg.c_str());
- }
- apply_limit();
- }
- next_seq += pending.top().skip;
- pending.pop();
- }
-}
-
-struct Needle {
- enum { STRSTR,
- REGEX,
- GLOB } type;
- string str; // Filled in no matter what.
- regex_t re; // For REGEX.
-};
-
-bool matches(const Needle &needle, const char *haystack)
-{
- if (needle.type == Needle::STRSTR) {
- return strstr(haystack, needle.str.c_str()) != nullptr;
- } else if (needle.type == Needle::GLOB) {
- int flags = ignore_case ? FNM_CASEFOLD : 0;
- return fnmatch(needle.str.c_str(), haystack, flags) == 0;
- } else {
- assert(needle.type == Needle::REGEX);
- return regexec(&needle.re, haystack, /*nmatch=*/0, /*pmatch=*/nullptr, /*flags=*/0) == 0;
- }
-}
-
-class AccessRXCache {
-public:
- AccessRXCache(IOUringEngine *engine)
- : engine(engine) {}
- void check_access(const char *filename, bool allow_async, function<void(bool)> cb);
-
-private:
- unordered_map<string, bool> cache;
- struct PendingStat {
- string filename;
- function<void(bool)> cb;
- };
- map<string, vector<PendingStat>> pending_stats;
- IOUringEngine *engine;
-};
-
-void AccessRXCache::check_access(const char *filename, bool allow_async, function<void(bool)> cb)
-{
- if (engine == nullptr || !engine->get_supports_stat()) {
- allow_async = false;
- }
-
- for (const char *end = strchr(filename + 1, '/'); end != nullptr; end = strchr(end + 1, '/')) {
- string parent_path(filename, end - filename); // string_view from C++20.
- auto cache_it = cache.find(parent_path);
- if (cache_it != cache.end()) {
- // Found in the cache.
- if (!cache_it->second) {
- cb(false);
- return;
- }
- continue;
- }
-
- if (!allow_async) {
- bool ok = access(parent_path.c_str(), R_OK | X_OK) == 0;
- cache.emplace(parent_path, ok);
- if (!ok) {
- cb(false);
- return;
- }
- continue;
- }
-
- // We want to call access(), but it could block on I/O. io_uring doesn't support
- // access(), but we can do a dummy asynchonous statx() to populate the kernel's cache,
- // which nearly always makes the next access() instantaneous.
-
- // See if there's already a pending stat that matches this,
- // or is a subdirectory.
- auto it = pending_stats.lower_bound(parent_path);
- if (it != pending_stats.end() && it->first.size() >= parent_path.size() &&
- it->first.compare(0, parent_path.size(), parent_path) == 0) {
- it->second.emplace_back(PendingStat{ filename, move(cb) });
- } else {
- it = pending_stats.emplace(filename, vector<PendingStat>{}).first;
- engine->submit_stat(filename, [this, it, filename{ strdup(filename) }, cb{ move(cb) }] {
- // The stat returned, so now do the actual access() calls.
- // All of them should be in cache, so don't fire off new statx()
- // calls during that check.
- check_access(filename, /*allow_async=*/false, move(cb));
- free(filename);
-
- // Call all others that waited for the same stat() to finish.
- // They may fire off new stat() calls if needed.
- vector<PendingStat> pending = move(it->second);
- pending_stats.erase(it);
- for (PendingStat &ps : pending) {
- check_access(ps.filename.c_str(), /*allow_async=*/true, move(ps.cb));
- }
- });
- }
- return; // The rest will happen in async context.
- }
-
- // Passed all checks.
- cb(true);
-}
-
class Corpus {
public:
Corpus(int fd, IOUringEngine *engine);
Corpus::Corpus(int fd, IOUringEngine *engine)
: fd(fd), engine(engine)
{
- // Enable to test cold-cache behavior (except for access()).
- if (true) {
+ if (flush_cache) {
off_t len = lseek(fd, 0, SEEK_END);
if (len == -1) {
perror("lseek");
hdr.zstd_dictionary_offset_bytes = 0;
hdr.zstd_dictionary_length_bytes = 0;
}
+ if (hdr.max_version < 2) {
+ // This too. (We ignore the other max_version 2 fields.)
+ hdr.check_visibility = true;
+ }
}
Corpus::~Corpus()
}
void scan_file_block(const vector<Needle> &needles, string_view compressed,
- AccessRXCache *access_rx_cache, uint64_t seq, Serializer *serializer,
- uint64_t *matched)
+ AccessRXCache *access_rx_cache, uint64_t seq, ResultReceiver *serializer,
+ atomic<uint64_t> *matched)
{
unsigned long long uncompressed_len = ZSTD_getFrameContentSize(compressed.data(), compressed.size());
if (uncompressed_len == ZSTD_CONTENTSIZE_UNKNOWN || uncompressed_len == ZSTD_CONTENTSIZE_ERROR) {
string block;
block.resize(uncompressed_len + 1);
- static ZSTD_DCtx *ctx = ZSTD_createDCtx(); // Reused across calls.
+ static thread_local ZSTD_DCtx *ctx = ZSTD_createDCtx(); // Reused across calls.
size_t err;
if (ddict != nullptr) {
for (const char *filename = block.data();
filename != block.data() + block.size();
filename += strlen(filename) + 1) {
+ const char *haystack = filename;
+ if (match_basename) {
+ haystack = strrchr(filename, '/');
+ if (haystack == nullptr) {
+ haystack = filename;
+ } else {
+ ++haystack;
+ }
+ }
+
bool found = true;
for (const Needle &needle : needles) {
- if (!matches(needle, filename)) {
+ if (!matches(needle, haystack)) {
found = false;
break;
}
size_t scan_docids(const vector<Needle> &needles, const vector<uint32_t> &docids, const Corpus &corpus, IOUringEngine *engine)
{
Serializer docids_in_order;
- AccessRXCache access_rx_cache(engine);
- uint64_t matched = 0;
+ AccessRXCache access_rx_cache(engine, corpus.get_hdr().check_visibility);
+ atomic<uint64_t> matched{ 0 };
for (size_t i = 0; i < docids.size(); ++i) {
uint32_t docid = docids[i];
corpus.get_compressed_filename_block(docid, [i, &matched, &needles, &access_rx_cache, &docids_in_order](string_view compressed) {
return matched;
}
+struct WorkerThread {
+ thread t;
+
+ // We use a result queue instead of synchronizing Serializer,
+ // since a lock on it becomes a huge choke point if there are
+ // lots of threads.
+ mutex result_mu;
+ struct Result {
+ uint64_t seq;
+ uint64_t skip;
+ string msg;
+ };
+ vector<Result> results;
+};
+
+class WorkerThreadReceiver : public ResultReceiver {
+public:
+ WorkerThreadReceiver(WorkerThread *wt)
+ : wt(wt) {}
+
+ void print(uint64_t seq, uint64_t skip, const string msg) override
+ {
+ lock_guard<mutex> lock(wt->result_mu);
+ if (msg.empty() && !wt->results.empty() && wt->results.back().seq + wt->results.back().skip == seq) {
+ wt->results.back().skip += skip;
+ } else {
+ wt->results.emplace_back(WorkerThread::Result{ seq, skip, move(msg) });
+ }
+ }
+
+private:
+ WorkerThread *wt;
+};
+
+void deliver_results(WorkerThread *wt, Serializer *serializer)
+{
+ vector<WorkerThread::Result> results;
+ {
+ lock_guard<mutex> lock(wt->result_mu);
+ results = move(wt->results);
+ }
+ for (const WorkerThread::Result &result : results) {
+ serializer->print(result.seq, result.skip, move(result.msg));
+ }
+}
+
// We do this sequentially, as it's faster than scattering
// a lot of I/O through io_uring and hoping the kernel will
-// coalesce it plus readahead for us.
+// coalesce it plus readahead for us. Since we assume that
+// we will primarily be CPU-bound, we'll be firing up one
+// worker thread for each spare core (the last one will
+// only be doing I/O). access() is still synchronous.
uint64_t scan_all_docids(const vector<Needle> &needles, int fd, const Corpus &corpus)
{
{
}
}
- AccessRXCache access_rx_cache(nullptr);
- Serializer serializer; // Mostly dummy; handles only the limit.
+ AccessRXCache access_rx_cache(nullptr, corpus.get_hdr().check_visibility);
+ Serializer serializer;
uint32_t num_blocks = corpus.get_num_filename_blocks();
unique_ptr<uint64_t[]> offsets(new uint64_t[num_blocks + 1]);
complete_pread(fd, offsets.get(), (num_blocks + 1) * sizeof(uint64_t), corpus.offset_for_block(0));
+ atomic<uint64_t> matched{ 0 };
+
+ mutex mu;
+ condition_variable queue_added, queue_removed;
+ deque<tuple<int, int, string>> work_queue; // Under mu.
+ bool done = false; // Under mu.
+
+ unsigned num_threads = max<int>(sysconf(_SC_NPROCESSORS_ONLN) - 1, 1);
+ dprintf("Using %u worker threads for linear scan.\n", num_threads);
+ unique_ptr<WorkerThread[]> threads(new WorkerThread[num_threads]);
+ for (unsigned i = 0; i < num_threads; ++i) {
+ threads[i].t = thread([&threads, &mu, &queue_added, &queue_removed, &work_queue, &done, &offsets, &needles, &access_rx_cache, &matched, i] {
+ // regcomp() takes a lock on the regex, so each thread will need its own.
+ const vector<Needle> *use_needles = &needles;
+ vector<Needle> recompiled_needles;
+ if (i != 0 && patterns_are_regex) {
+ recompiled_needles = needles;
+ for (Needle &needle : recompiled_needles) {
+ needle.re = compile_regex(needle.str);
+ }
+ use_needles = &recompiled_needles;
+ }
+
+ WorkerThreadReceiver receiver(&threads[i]);
+ for (;;) {
+ uint32_t io_docid, last_docid;
+ string compressed;
+
+ {
+ unique_lock<mutex> lock(mu);
+ queue_added.wait(lock, [&work_queue, &done] { return !work_queue.empty() || done; });
+ if (done && work_queue.empty()) {
+ return;
+ }
+ tie(io_docid, last_docid, compressed) = move(work_queue.front());
+ work_queue.pop_front();
+ queue_removed.notify_all();
+ }
+
+ for (uint32_t docid = io_docid; docid < last_docid; ++docid) {
+ size_t relative_offset = offsets[docid] - offsets[io_docid];
+ size_t len = offsets[docid + 1] - offsets[docid];
+ scan_file_block(*use_needles, { &compressed[relative_offset], len }, &access_rx_cache, docid, &receiver, &matched);
+ }
+ }
+ });
+ }
+
string compressed;
- uint64_t matched = 0;
for (uint32_t io_docid = 0; io_docid < num_blocks; io_docid += 32) {
uint32_t last_docid = std::min(io_docid + 32, num_blocks);
size_t io_len = offsets[last_docid] - offsets[io_docid];
}
complete_pread(fd, &compressed[0], io_len, offsets[io_docid]);
- for (uint32_t docid = io_docid; docid < last_docid; ++docid) {
- size_t relative_offset = offsets[docid] - offsets[io_docid];
- size_t len = offsets[docid + 1] - offsets[docid];
- scan_file_block(needles, { &compressed[relative_offset], len }, &access_rx_cache, docid, &serializer, &matched);
+ {
+ unique_lock<mutex> lock(mu);
+ queue_removed.wait(lock, [&work_queue] { return work_queue.size() < 256; }); // Allow ~2MB of data queued up.
+ work_queue.emplace_back(io_docid, last_docid, move(compressed));
+ queue_added.notify_one(); // Avoid the thundering herd.
}
+
+ // Pick up some results, so that we are sure that we won't just overload.
+ // (Seemingly, going through all of these causes slowness with many threads,
+ // but taking only one is OK.)
+ unsigned i = io_docid / 32;
+ deliver_results(&threads[i % num_threads], &serializer);
+ }
+ {
+ lock_guard<mutex> lock(mu);
+ done = true;
+ queue_added.notify_all();
+ }
+ for (unsigned i = 0; i < num_threads; ++i) {
+ threads[i].t.join();
+ deliver_results(&threads[i], &serializer);
}
return matched;
}
// the pattern and done a union of them, but that's a lot of
// work for fairly unclear gain.)
uint64_t matched = scan_all_docids(needles, fd, corpus);
+ dprintf("Done in %.1f ms, found %" PRId64 " matches.\n",
+ 1e3 * duration<float>(steady_clock::now() - start).count(), matched);
if (only_count) {
printf("%" PRId64 "\n", matched);
}
if (done)
return;
- uint32_t trgm __attribute__((unused)) = trgmptr.trgm;
+ uint32_t trgm = trgmptr.trgm;
const unsigned char *pldata = reinterpret_cast<const unsigned char *>(s.data());
size_t num = trgmptr.num_docids;
decoded.resize(num);
}
}
-string unescape_glob_to_plain_string(const string &needle)
-{
- string unescaped;
- for (size_t i = 0; i < needle.size(); i += read_unigram(needle, i).second) {
- uint32_t ch = read_unigram(needle, i).first;
- assert(ch != WILDCARD_UNIGRAM);
- if (ch == PREMATURE_END_UNIGRAM) {
- fprintf(stderr, "Pattern '%s' ended prematurely\n", needle.c_str());
- exit(1);
- }
- unescaped.push_back(ch);
- }
- return unescaped;
-}
-
-regex_t compile_regex(const string &needle)
-{
- regex_t re;
- int flags = REG_NOSUB;
- if (ignore_case) {
- flags |= REG_ICASE;
- }
- if (use_extended_regex) {
- flags |= REG_EXTENDED;
- }
- int err = regcomp(&re, needle.c_str(), flags);
- if (err != 0) {
- char errbuf[256];
- regerror(err, &re, errbuf, sizeof(errbuf));
- fprintf(stderr, "Error when compiling regex '%s': %s\n", needle.c_str(), errbuf);
- exit(1);
- }
- return re;
-}
-
void usage()
{
printf(
"Usage: plocate [OPTION]... PATTERN...\n"
"\n"
+ " -b, --basename search only the file name portion of path names\n"
" -c, --count print number of matches instead of the matches\n"
" -d, --database DBPATH search for files in DBPATH\n"
- " (default is " DEFAULT_DBPATH ")\n"
+ " (default is " DBFILE ")\n"
" -i, --ignore-case search case-insensitively\n"
" -l, --limit LIMIT stop after LIMIT matches\n"
" -0, --null delimit matches by NUL instead of newline\n"
" -r, --regexp interpret patterns as basic regexps (slow)\n"
" --regex interpret patterns as extended regexps (slow)\n"
+ " -w, --wholename search the entire path name (default; see -b)\n"
" --help print this help\n"
" --version print version information\n");
}
void version()
{
- printf("plocate %s\n", PLOCATE_VERSION);
+ printf("%s %s\n", PACKAGE_NAME, PACKAGE_VERSION);
printf("Copyright 2020 Steinar H. Gunderson\n");
printf("License GPLv2+: GNU GPL version 2 or later <https://gnu.org/licenses/gpl.html>.\n");
printf("This is free software: you are free to change and redistribute it.\n");
int main(int argc, char **argv)
{
constexpr int EXTENDED_REGEX = 1000;
+ constexpr int FLUSH_CACHE = 1001;
static const struct option long_options[] = {
{ "help", no_argument, 0, 'h' },
{ "count", no_argument, 0, 'c' },
+ { "basename", no_argument, 0, 'b' },
{ "database", required_argument, 0, 'd' },
{ "ignore-case", no_argument, 0, 'i' },
{ "limit", required_argument, 0, 'l' },
{ "version", no_argument, 0, 'V' },
{ "regexp", no_argument, 0, 'r' },
{ "regex", no_argument, 0, EXTENDED_REGEX },
+ { "wholename", no_argument, 0, 'w' },
{ "debug", no_argument, 0, 'D' }, // Not documented.
+ // Enable to test cold-cache behavior (except for access()). Not documented.
+ { "flush-cache", no_argument, 0, FLUSH_CACHE },
{ 0, 0, 0, 0 }
};
setlocale(LC_ALL, "");
for (;;) {
int option_index = 0;
- int c = getopt_long(argc, argv, "cd:hil:n:0VD", long_options, &option_index);
+ int c = getopt_long(argc, argv, "bcd:hil:n:0rwVD", long_options, &option_index);
if (c == -1) {
break;
}
switch (c) {
+ case 'b':
+ match_basename = true;
+ break;
case 'c':
only_count = true;
break;
patterns_are_regex = true;
use_extended_regex = true;
break;
+ case 'w':
+ match_basename = false; // No-op unless -b is given first.
+ break;
case 'D':
use_debug = true;
break;
+ case FLUSH_CACHE:
+ flush_cache = true;
+ break;
case 'V':
version();
break;
}
}
- if (use_debug) {
+ if (use_debug || flush_cache) {
// Debug information would leak information about which files exist,
// so drop setgid before we open the file; one would either need to run
- // as root, or use a locally-built file.
+ // as root, or use a locally-built file. Doing the same thing for
+ // flush_cache is mostly paranoia, in an attempt to prevent random users
+ // from making plocate slow for everyone else.
if (setgid(getgid()) != 0) {
perror("setgid");
exit(EXIT_FAILURE);
}
}
+ if (!print_nul) {
+ stdout_is_tty = isatty(1);
+ }
+
vector<Needle> needles;
for (int i = optind; i < argc; ++i) {
Needle needle;