]> git.sesse.net Git - plocate/blob - plocate.cpp
plocate: fix compilation without libiouring
[plocate] / plocate.cpp
1 #include "access_rx_cache.h"
2 #include "complete_pread.h"
3 #include "db.h"
4 #include "dprintf.h"
5 #include "io_uring_engine.h"
6 #include "needle.h"
7 #include "parse_trigrams.h"
8 #include "serializer.h"
9 #include "turbopfor.h"
10 #include "unique_sort.h"
11
12 #include <algorithm>
13 #include <assert.h>
14 #include <atomic>
15 #include <chrono>
16 #include <condition_variable>
17 #include <deque>
18 #include <fcntl.h>
19 #include <functional>
20 #include <getopt.h>
21 #include <inttypes.h>
22 #include <iterator>
23 #include <limits>
24 #include <locale.h>
25 #include <memory>
26 #include <mutex>
27 #include <regex.h>
28 #include <stdint.h>
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <string>
33 #include <string_view>
34 #include <sys/stat.h>
35 #include <sys/types.h>
36 #include <sys/wait.h>
37 #include <thread>
38 #include <tuple>
39 #include <unistd.h>
40 #include <unordered_map>
41 #include <unordered_set>
42 #include <utility>
43 #include <vector>
44 #include <zstd.h>
45
46 using namespace std;
47 using namespace std::chrono;
48
49 bool ignore_case = false;
50 bool only_count = false;
51 bool print_nul = false;
52 bool use_debug = false;
53 bool flush_cache = false;
54 bool patterns_are_regex = false;
55 bool use_extended_regex = false;
56 bool match_basename = false;
57 bool check_existence = false;
58 int64_t limit_matches = numeric_limits<int64_t>::max();
59 int64_t limit_left = numeric_limits<int64_t>::max();
60 bool stdout_is_tty = false;
61 static bool in_forked_child = false;
62
63 steady_clock::time_point start;
64 ZSTD_DDict *ddict = nullptr;
65
66 class Corpus {
67 public:
68         Corpus(int fd, IOUringEngine *engine);
69         ~Corpus();
70         void find_trigram(uint32_t trgm, function<void(const Trigram *trgmptr, size_t len)> cb);
71         void get_compressed_filename_block(uint32_t docid, function<void(string_view)> cb) const;
72         size_t get_num_filename_blocks() const;
73         off_t offset_for_block(uint32_t docid) const
74         {
75                 return hdr.filename_index_offset_bytes + docid * sizeof(uint64_t);
76         }
77         const Header &get_hdr() const { return hdr; }
78
79 public:
80         const int fd;
81         IOUringEngine *const engine;
82
83         Header hdr;
84 };
85
86 Corpus::Corpus(int fd, IOUringEngine *engine)
87         : fd(fd), engine(engine)
88 {
89         if (flush_cache) {
90                 off_t len = lseek(fd, 0, SEEK_END);
91                 if (len == -1) {
92                         perror("lseek");
93                         exit(1);
94                 }
95                 posix_fadvise(fd, 0, len, POSIX_FADV_DONTNEED);
96         }
97
98         complete_pread(fd, &hdr, sizeof(hdr), /*offset=*/0);
99         if (memcmp(hdr.magic, "\0plocate", 8) != 0) {
100                 fprintf(stderr, "plocate.db is corrupt or an old version; please rebuild it.\n");
101                 exit(1);
102         }
103         if (hdr.version != 0 && hdr.version != 1) {
104                 fprintf(stderr, "plocate.db has version %u, expected 0 or 1; please rebuild it.\n", hdr.version);
105                 exit(1);
106         }
107         if (hdr.version == 0) {
108                 // These will be junk data.
109                 hdr.zstd_dictionary_offset_bytes = 0;
110                 hdr.zstd_dictionary_length_bytes = 0;
111         }
112         if (hdr.max_version < 2) {
113                 // This too. (We ignore the other max_version 2 fields.)
114                 hdr.check_visibility = true;
115         }
116 }
117
118 Corpus::~Corpus()
119 {
120         close(fd);
121 }
122
123 void Corpus::find_trigram(uint32_t trgm, function<void(const Trigram *trgmptr, size_t len)> cb)
124 {
125         uint32_t bucket = hash_trigram(trgm, hdr.hashtable_size);
126         engine->submit_read(fd, sizeof(Trigram) * (hdr.extra_ht_slots + 2), hdr.hash_table_offset_bytes + sizeof(Trigram) * bucket, [this, trgm, cb{ move(cb) }](string_view s) {
127                 const Trigram *trgmptr = reinterpret_cast<const Trigram *>(s.data());
128                 for (unsigned i = 0; i < hdr.extra_ht_slots + 1; ++i) {
129                         if (trgmptr[i].trgm == trgm) {
130                                 cb(trgmptr + i, trgmptr[i + 1].offset - trgmptr[i].offset);
131                                 return;
132                         }
133                 }
134
135                 // Not found.
136                 cb(nullptr, 0);
137         });
138 }
139
140 void Corpus::get_compressed_filename_block(uint32_t docid, function<void(string_view)> cb) const
141 {
142         // Read the file offset from this docid and the next one.
143         // This is always allowed, since we have a sentinel block at the end.
144         engine->submit_read(fd, sizeof(uint64_t) * 2, offset_for_block(docid), [this, cb{ move(cb) }](string_view s) {
145                 const uint64_t *ptr = reinterpret_cast<const uint64_t *>(s.data());
146                 off_t offset = ptr[0];
147                 size_t len = ptr[1] - ptr[0];
148                 engine->submit_read(fd, len, offset, cb);
149         });
150 }
151
152 size_t Corpus::get_num_filename_blocks() const
153 {
154         return hdr.num_docids;
155 }
156
157 template<class T>
158 void stat_if_needed(const char *filename, bool access_ok, IOUringEngine *engine, T cb)
159 {
160         if (!access_ok || !check_existence) {
161                 // Doesn't have access or doesn't care about existence, so no need to stat.
162                 cb(access_ok);
163         } else if (engine == nullptr || !engine->get_supports_stat()) {
164                 // Do a synchronous stat.
165                 struct stat buf;
166                 bool ok = lstat(filename, &buf) == 0;
167                 cb(ok);
168         } else {
169                 engine->submit_stat(filename, cb);
170         }
171 }
172
173 void scan_file_block(const vector<Needle> &needles, string_view compressed,
174                      IOUringEngine *engine, AccessRXCache *access_rx_cache, uint64_t seq, ResultReceiver *serializer,
175                      atomic<uint64_t> *matched)
176 {
177         unsigned long long uncompressed_len = ZSTD_getFrameContentSize(compressed.data(), compressed.size());
178         if (uncompressed_len == ZSTD_CONTENTSIZE_UNKNOWN || uncompressed_len == ZSTD_CONTENTSIZE_ERROR) {
179                 fprintf(stderr, "ZSTD_getFrameContentSize() failed\n");
180                 exit(1);
181         }
182
183         string block;
184         block.resize(uncompressed_len + 1);
185
186         static thread_local ZSTD_DCtx *ctx = ZSTD_createDCtx();  // Reused across calls.
187         size_t err;
188
189         if (ddict != nullptr) {
190                 err = ZSTD_decompress_usingDDict(ctx, &block[0], block.size(), compressed.data(),
191                                                  compressed.size(), ddict);
192         } else {
193                 err = ZSTD_decompressDCtx(ctx, &block[0], block.size(), compressed.data(),
194                                           compressed.size());
195         }
196         if (ZSTD_isError(err)) {
197                 fprintf(stderr, "ZSTD_decompress(): %s\n", ZSTD_getErrorName(err));
198                 exit(1);
199         }
200         block[block.size() - 1] = '\0';
201
202         auto test_candidate = [&](const char *filename, uint64_t local_seq, uint64_t next_seq) {
203                 access_rx_cache->check_access(filename, /*allow_async=*/true, [matched, engine, serializer, local_seq, next_seq, filename{ strdup(filename) }](bool ok) {
204                         stat_if_needed(filename, ok, engine, [matched, serializer, local_seq, next_seq, filename](bool ok) {
205                                 if (ok) {
206                                         ++*matched;
207                                         serializer->print(local_seq, next_seq - local_seq, filename);
208                                 } else {
209                                         serializer->print(local_seq, next_seq - local_seq, "");
210                                 }
211                                 free(filename);
212                         });
213                 });
214         };
215
216         // We need to know the next sequence number before inserting into Serializer,
217         // so always buffer one candidate.
218         const char *pending_candidate = nullptr;
219
220         uint64_t local_seq = seq << 32;
221         for (const char *filename = block.data();
222              filename != block.data() + block.size();
223              filename += strlen(filename) + 1) {
224                 const char *haystack = filename;
225                 if (match_basename) {
226                         haystack = strrchr(filename, '/');
227                         if (haystack == nullptr) {
228                                 haystack = filename;
229                         } else {
230                                 ++haystack;
231                         }
232                 }
233
234                 bool found = true;
235                 for (const Needle &needle : needles) {
236                         if (!matches(needle, haystack)) {
237                                 found = false;
238                                 break;
239                         }
240                 }
241                 if (found) {
242                         if (pending_candidate != nullptr) {
243                                 test_candidate(pending_candidate, local_seq, local_seq + 1);
244                                 ++local_seq;
245                         }
246                         pending_candidate = filename;
247                 }
248         }
249         if (pending_candidate == nullptr) {
250                 serializer->print(seq << 32, 1ULL << 32, "");
251         } else {
252                 test_candidate(pending_candidate, local_seq, (seq + 1) << 32);
253         }
254 }
255
256 size_t scan_docids(const vector<Needle> &needles, const vector<uint32_t> &docids, const Corpus &corpus, IOUringEngine *engine)
257 {
258         Serializer docids_in_order;
259         AccessRXCache access_rx_cache(engine, corpus.get_hdr().check_visibility);
260         atomic<uint64_t> matched{ 0 };
261         for (size_t i = 0; i < docids.size(); ++i) {
262                 uint32_t docid = docids[i];
263                 corpus.get_compressed_filename_block(docid, [i, &matched, &needles, &access_rx_cache, engine, &docids_in_order](string_view compressed) {
264                         scan_file_block(needles, compressed, engine, &access_rx_cache, i, &docids_in_order, &matched);
265                 });
266         }
267         engine->finish();
268         return matched;
269 }
270
271 struct WorkerThread {
272         thread t;
273
274         // We use a result queue instead of synchronizing Serializer,
275         // since a lock on it becomes a huge choke point if there are
276         // lots of threads.
277         mutex result_mu;
278         struct Result {
279                 uint64_t seq;
280                 uint64_t skip;
281                 string msg;
282         };
283         vector<Result> results;
284 };
285
286 class WorkerThreadReceiver : public ResultReceiver {
287 public:
288         WorkerThreadReceiver(WorkerThread *wt)
289                 : wt(wt) {}
290
291         void print(uint64_t seq, uint64_t skip, const string msg) override
292         {
293                 lock_guard<mutex> lock(wt->result_mu);
294                 if (msg.empty() && !wt->results.empty() && wt->results.back().seq + wt->results.back().skip == seq) {
295                         wt->results.back().skip += skip;
296                 } else {
297                         wt->results.emplace_back(WorkerThread::Result{ seq, skip, move(msg) });
298                 }
299         }
300
301 private:
302         WorkerThread *wt;
303 };
304
305 void deliver_results(WorkerThread *wt, Serializer *serializer)
306 {
307         vector<WorkerThread::Result> results;
308         {
309                 lock_guard<mutex> lock(wt->result_mu);
310                 results = move(wt->results);
311         }
312         for (const WorkerThread::Result &result : results) {
313                 serializer->print(result.seq, result.skip, move(result.msg));
314         }
315 }
316
317 // We do this sequentially, as it's faster than scattering
318 // a lot of I/O through io_uring and hoping the kernel will
319 // coalesce it plus readahead for us. Since we assume that
320 // we will primarily be CPU-bound, we'll be firing up one
321 // worker thread for each spare core (the last one will
322 // only be doing I/O). access() is still synchronous.
323 uint64_t scan_all_docids(const vector<Needle> &needles, int fd, const Corpus &corpus)
324 {
325         {
326                 const Header &hdr = corpus.get_hdr();
327                 if (hdr.zstd_dictionary_length_bytes > 0) {
328                         string dictionary;
329                         dictionary.resize(hdr.zstd_dictionary_length_bytes);
330                         complete_pread(fd, &dictionary[0], hdr.zstd_dictionary_length_bytes, hdr.zstd_dictionary_offset_bytes);
331                         ddict = ZSTD_createDDict(dictionary.data(), dictionary.size());
332                 }
333         }
334
335         AccessRXCache access_rx_cache(nullptr, corpus.get_hdr().check_visibility);
336         Serializer serializer;
337         uint32_t num_blocks = corpus.get_num_filename_blocks();
338         unique_ptr<uint64_t[]> offsets(new uint64_t[num_blocks + 1]);
339         complete_pread(fd, offsets.get(), (num_blocks + 1) * sizeof(uint64_t), corpus.offset_for_block(0));
340         atomic<uint64_t> matched{ 0 };
341
342         mutex mu;
343         condition_variable queue_added, queue_removed;
344         deque<tuple<int, int, string>> work_queue;  // Under mu.
345         bool done = false;  // Under mu.
346
347         unsigned num_threads = max<int>(sysconf(_SC_NPROCESSORS_ONLN) - 1, 1);
348         dprintf("Using %u worker threads for linear scan.\n", num_threads);
349         unique_ptr<WorkerThread[]> threads(new WorkerThread[num_threads]);
350         for (unsigned i = 0; i < num_threads; ++i) {
351                 threads[i].t = thread([&threads, &mu, &queue_added, &queue_removed, &work_queue, &done, &offsets, &needles, &access_rx_cache, engine{ corpus.engine }, &matched, i] {
352                         // regcomp() takes a lock on the regex, so each thread will need its own.
353                         const vector<Needle> *use_needles = &needles;
354                         vector<Needle> recompiled_needles;
355                         if (i != 0 && patterns_are_regex) {
356                                 recompiled_needles = needles;
357                                 for (Needle &needle : recompiled_needles) {
358                                         needle.re = compile_regex(needle.str);
359                                 }
360                                 use_needles = &recompiled_needles;
361                         }
362
363                         WorkerThreadReceiver receiver(&threads[i]);
364                         for (;;) {
365                                 uint32_t io_docid, last_docid;
366                                 string compressed;
367
368                                 {
369                                         unique_lock<mutex> lock(mu);
370                                         queue_added.wait(lock, [&work_queue, &done] { return !work_queue.empty() || done; });
371                                         if (done && work_queue.empty()) {
372                                                 return;
373                                         }
374                                         tie(io_docid, last_docid, compressed) = move(work_queue.front());
375                                         work_queue.pop_front();
376                                         queue_removed.notify_all();
377                                 }
378
379                                 for (uint32_t docid = io_docid; docid < last_docid; ++docid) {
380                                         size_t relative_offset = offsets[docid] - offsets[io_docid];
381                                         size_t len = offsets[docid + 1] - offsets[docid];
382                                         scan_file_block(*use_needles, { &compressed[relative_offset], len }, engine, &access_rx_cache, docid, &receiver, &matched);
383                                 }
384                         }
385                 });
386         }
387
388         string compressed;
389         for (uint32_t io_docid = 0; io_docid < num_blocks; io_docid += 32) {
390                 uint32_t last_docid = std::min(io_docid + 32, num_blocks);
391                 size_t io_len = offsets[last_docid] - offsets[io_docid];
392                 if (compressed.size() < io_len) {
393                         compressed.resize(io_len);
394                 }
395                 complete_pread(fd, &compressed[0], io_len, offsets[io_docid]);
396
397                 {
398                         unique_lock<mutex> lock(mu);
399                         queue_removed.wait(lock, [&work_queue] { return work_queue.size() < 256; });  // Allow ~2MB of data queued up.
400                         work_queue.emplace_back(io_docid, last_docid, move(compressed));
401                         queue_added.notify_one();  // Avoid the thundering herd.
402                 }
403
404                 // Pick up some results, so that we are sure that we won't just overload.
405                 // (Seemingly, going through all of these causes slowness with many threads,
406                 // but taking only one is OK.)
407                 unsigned i = io_docid / 32;
408                 deliver_results(&threads[i % num_threads], &serializer);
409         }
410         {
411                 lock_guard<mutex> lock(mu);
412                 done = true;
413                 queue_added.notify_all();
414         }
415         for (unsigned i = 0; i < num_threads; ++i) {
416                 threads[i].t.join();
417                 deliver_results(&threads[i], &serializer);
418         }
419         return matched;
420 }
421
422 // Takes the given posting list, unions it into the parts of the trigram disjunction
423 // already read; if the list is complete, intersects with “cur_candidates”.
424 //
425 // Returns true if the search should be aborted (we are done).
426 bool new_posting_list_read(TrigramDisjunction *td, vector<uint32_t> decoded, vector<uint32_t> *cur_candidates, vector<uint32_t> *tmp)
427 {
428         if (td->docids.empty()) {
429                 td->docids = move(decoded);
430         } else {
431                 tmp->clear();
432                 set_union(decoded.begin(), decoded.end(), td->docids.begin(), td->docids.end(), back_inserter(*tmp));
433                 swap(*tmp, td->docids);
434         }
435         if (--td->remaining_trigrams_to_read > 0) {
436                 // Need to wait for more.
437                 if (ignore_case) {
438                         dprintf("  ... %u reads left in OR group %u (%zu docids in list)\n",
439                                 td->remaining_trigrams_to_read, td->index, td->docids.size());
440                 }
441                 return false;
442         }
443         if (cur_candidates->empty()) {
444                 if (ignore_case) {
445                         dprintf("  ... all reads done for OR group %u (%zu docids)\n",
446                                 td->index, td->docids.size());
447                 }
448                 *cur_candidates = move(td->docids);
449         } else {
450                 tmp->clear();
451                 set_intersection(cur_candidates->begin(), cur_candidates->end(),
452                                  td->docids.begin(), td->docids.end(),
453                                  back_inserter(*tmp));
454                 swap(*cur_candidates, *tmp);
455                 if (ignore_case) {
456                         if (cur_candidates->empty()) {
457                                 dprintf("  ... all reads done for OR group %u (%zu docids), intersected (none left, search is done)\n",
458                                         td->index, td->docids.size());
459                                 return true;
460                         } else {
461                                 dprintf("  ... all reads done for OR group %u (%zu docids), intersected (%zu left)\n",
462                                         td->index, td->docids.size(), cur_candidates->size());
463                         }
464                 }
465         }
466         return false;
467 }
468
469 uint64_t do_search_file(const vector<Needle> &needles, const std::string &filename)
470 {
471         int fd = open(filename.c_str(), O_RDONLY);
472         if (fd == -1) {
473                 perror(filename.c_str());
474                 exit(1);
475         }
476
477         // Drop privileges.
478         if (setgid(getgid()) != 0) {
479                 perror("setgid");
480                 exit(EXIT_FAILURE);
481         }
482
483         start = steady_clock::now();
484         if (access("/", R_OK | X_OK)) {
485                 // We can't find anything, no need to bother...
486                 return 0;
487         }
488
489         IOUringEngine engine(/*slop_bytes=*/16);  // 16 slop bytes as described in turbopfor.h.
490         Corpus corpus(fd, &engine);
491         dprintf("Corpus init done after %.1f ms.\n", 1e3 * duration<float>(steady_clock::now() - start).count());
492
493         vector<TrigramDisjunction> trigram_groups;
494         if (patterns_are_regex) {
495                 // We could parse the regex to find trigrams that have to be there
496                 // (there are actually known algorithms to deal with disjunctions
497                 // and such, too), but for now, we just go brute force.
498                 // Using locate with regexes is pretty niche.
499         } else {
500                 for (const Needle &needle : needles) {
501                         parse_trigrams(needle.str, ignore_case, &trigram_groups);
502                 }
503         }
504
505         unique_sort(
506                 &trigram_groups,
507                 [](const TrigramDisjunction &a, const TrigramDisjunction &b) { return a.trigram_alternatives < b.trigram_alternatives; },
508                 [](const TrigramDisjunction &a, const TrigramDisjunction &b) { return a.trigram_alternatives == b.trigram_alternatives; });
509
510         // Give them names for debugging.
511         unsigned td_index = 0;
512         for (TrigramDisjunction &td : trigram_groups) {
513                 td.index = td_index++;
514         }
515
516         // Collect which trigrams we need to look up in the hash table.
517         unordered_map<uint32_t, vector<TrigramDisjunction *>> trigrams_to_lookup;
518         for (TrigramDisjunction &td : trigram_groups) {
519                 for (uint32_t trgm : td.trigram_alternatives) {
520                         trigrams_to_lookup[trgm].push_back(&td);
521                 }
522         }
523         if (trigrams_to_lookup.empty()) {
524                 // Too short for trigram matching. Apply brute force.
525                 // (We could have searched through all trigrams that matched
526                 // the pattern and done a union of them, but that's a lot of
527                 // work for fairly unclear gain.)
528                 uint64_t matched = scan_all_docids(needles, fd, corpus);
529                 dprintf("Done in %.1f ms, found %" PRId64 " matches.\n",
530                         1e3 * duration<float>(steady_clock::now() - start).count(), matched);
531                 return matched;
532         }
533
534         // Sneak in fetching the dictionary, if present. It's not necessarily clear
535         // exactly where it would be cheapest to get it, but it needs to be present
536         // before we can decode any of the posting lists. Most likely, it's
537         // in the same filesystem block as the header anyway, so it should be
538         // present in the cache.
539         {
540                 const Header &hdr = corpus.get_hdr();
541                 if (hdr.zstd_dictionary_length_bytes > 0) {
542                         engine.submit_read(fd, hdr.zstd_dictionary_length_bytes, hdr.zstd_dictionary_offset_bytes, [](string_view s) {
543                                 ddict = ZSTD_createDDict(s.data(), s.size());
544                                 dprintf("Dictionary initialized after %.1f ms.\n", 1e3 * duration<float>(steady_clock::now() - start).count());
545                         });
546                 }
547         }
548
549         // Look them all up on disk.
550         bool should_early_exit = false;
551         for (auto &[trgm, trigram_groups] : trigrams_to_lookup) {
552                 corpus.find_trigram(trgm, [trgm{ trgm }, trigram_groups{ &trigram_groups }, &should_early_exit](const Trigram *trgmptr, size_t len) {
553                         if (trgmptr == nullptr) {
554                                 dprintf("trigram %s isn't found\n", print_trigram(trgm).c_str());
555                                 for (TrigramDisjunction *td : *trigram_groups) {
556                                         --td->remaining_trigrams_to_read;
557
558                                         // If we now know this trigram group doesn't match anything at all,
559                                         // we can do early exit; however, if we're in a forked child,
560                                         // that would confuse the parent process (since we don't write
561                                         // our count to the pipe), so we wait until we're back in to the
562                                         // regular (non-async) context. This is a fairly rare case anyway,
563                                         // and the gains from dropping the remaining trigram reads are limited.
564                                         if (td->remaining_trigrams_to_read == 0 && td->read_trigrams.empty()) {
565                                                 if (in_forked_child) {
566                                                         should_early_exit = true;
567                                                 } else {
568                                                         dprintf("zero matches in %s, so we are done\n", print_td(*td).c_str());
569                                                         if (only_count) {
570                                                                 printf("0\n");
571                                                         }
572                                                         exit(0);
573                                                 }
574                                         }
575                                 }
576                                 return;
577                         }
578                         for (TrigramDisjunction *td : *trigram_groups) {
579                                 --td->remaining_trigrams_to_read;
580                                 td->max_num_docids += trgmptr->num_docids;
581                                 td->read_trigrams.emplace_back(*trgmptr, len);
582                         }
583                 });
584         }
585         engine.finish();
586         dprintf("Hashtable lookups done after %.1f ms.\n", 1e3 * duration<float>(steady_clock::now() - start).count());
587
588         if (should_early_exit) {
589                 return 0;
590         }
591
592         for (TrigramDisjunction &td : trigram_groups) {
593                 // Reset for reads.
594                 td.remaining_trigrams_to_read = td.read_trigrams.size();
595
596                 if (ignore_case) {  // If case-sensitive, they'll all be pretty obvious single-entry groups.
597                         dprintf("OR group %u (max_num_docids=%u): %s\n", td.index, td.max_num_docids, print_td(td).c_str());
598                 }
599         }
600
601         // TODO: For case-insensitive (ie. more than one alternative in each),
602         // prioritize the ones with fewer seeks?
603         sort(trigram_groups.begin(), trigram_groups.end(),
604              [&](const TrigramDisjunction &a, const TrigramDisjunction &b) {
605                      return a.max_num_docids < b.max_num_docids;
606              });
607
608         unordered_map<uint32_t, vector<TrigramDisjunction *>> uses_trigram;
609         for (TrigramDisjunction &td : trigram_groups) {
610                 for (uint32_t trgm : td.trigram_alternatives) {
611                         uses_trigram[trgm].push_back(&td);
612                 }
613         }
614
615         unordered_set<uint32_t> trigrams_submitted_read;
616         vector<uint32_t> cur_candidates, tmp, decoded;
617         bool done = false;
618         for (TrigramDisjunction &td : trigram_groups) {
619                 if (!cur_candidates.empty() && td.max_num_docids > cur_candidates.size() * 100) {
620                         dprintf("%s has up to %u entries, ignoring the rest (will "
621                                 "weed out false positives later)\n",
622                                 print_td(td).c_str(), td.max_num_docids);
623                         break;
624                 }
625
626                 for (auto &[trgmptr, len] : td.read_trigrams) {
627                         if (trigrams_submitted_read.count(trgmptr.trgm) != 0) {
628                                 continue;
629                         }
630                         trigrams_submitted_read.insert(trgmptr.trgm);
631                         // Only stay a certain amount ahead, so that we don't spend I/O
632                         // on reading the latter, large posting lists. We are unlikely
633                         // to need them anyway, even if they should come in first.
634                         if (engine.get_waiting_reads() >= 5) {
635                                 engine.finish();
636                                 if (done)
637                                         break;
638                         }
639                         engine.submit_read(fd, len, trgmptr.offset, [trgmptr{ trgmptr }, len{ len }, &done, &cur_candidates, &tmp, &decoded, &uses_trigram](string_view s) {
640                                 if (done)
641                                         return;
642
643                                 uint32_t trgm = trgmptr.trgm;
644                                 const unsigned char *pldata = reinterpret_cast<const unsigned char *>(s.data());
645                                 size_t num = trgmptr.num_docids;
646                                 decoded.resize(num);
647                                 decode_pfor_delta1_128(pldata, num, /*interleaved=*/true, &decoded[0]);
648
649                                 assert(uses_trigram.count(trgm) != 0);
650                                 bool was_empty = cur_candidates.empty();
651                                 if (ignore_case) {
652                                         dprintf("trigram %s (%zu bytes) decoded to %zu entries\n", print_trigram(trgm).c_str(), len, num);
653                                 }
654
655                                 for (TrigramDisjunction *td : uses_trigram[trgm]) {
656                                         done |= new_posting_list_read(td, decoded, &cur_candidates, &tmp);
657                                         if (done)
658                                                 break;
659                                 }
660                                 if (!ignore_case) {
661                                         if (was_empty) {
662                                                 dprintf("trigram %s (%zu bytes) decoded to %zu entries\n", print_trigram(trgm).c_str(), len, num);
663                                         } else if (cur_candidates.empty()) {
664                                                 dprintf("trigram %s (%zu bytes) decoded to %zu entries (none left, search is done)\n", print_trigram(trgm).c_str(), len, num);
665                                         } else {
666                                                 dprintf("trigram %s (%zu bytes) decoded to %zu entries (%zu left)\n", print_trigram(trgm).c_str(), len, num, cur_candidates.size());
667                                         }
668                                 }
669                         });
670                 }
671         }
672         engine.finish();
673         if (done) {
674                 return 0;
675         }
676         dprintf("Intersection done after %.1f ms. Doing final verification and printing:\n",
677                 1e3 * duration<float>(steady_clock::now() - start).count());
678
679         uint64_t matched = scan_docids(needles, cur_candidates, corpus, &engine);
680         dprintf("Done in %.1f ms, found %" PRId64 " matches.\n",
681                 1e3 * duration<float>(steady_clock::now() - start).count(), matched);
682         return matched;
683 }
684
685 // Run do_search_file() in a child process.
686 //
687 // The reason for this is that we're not robust against malicious input, so we need
688 // to drop privileges after opening the file. (Otherwise, we could fall prey to an attack
689 // where a user does locate -d badfile.db:/var/lib/plocate/plocate.db, badfile.db contains
690 // a buffer overflow that takes over the process, and then uses the elevated privileges
691 // to print out otherwise inaccessible paths.) We solve this by forking and treating the
692 // child process as untrusted after it has dropped its privileges (which it does before
693 // reading any data from the file); it returns a single 64-bit number over a pipe,
694 // and that's it. The parent keeps its privileges, and can then fork out new children
695 // without fear of being taken over. (The child keeps stdout for outputting results.)
696 //
697 // The count is returned over the pipe, because it's needed both for --limit and --count.
698 uint64_t do_search_file_in_child(const vector<Needle> &needles, const std::string &filename)
699 {
700         int pipefd[2];
701         if (pipe(pipefd) == -1) {
702                 perror("pipe");
703                 exit(EXIT_FAILURE);
704         }
705
706         pid_t child_pid = fork();
707         switch (child_pid) {
708         case 0: {
709                 // Child.
710                 close(pipefd[0]);
711                 in_forked_child = true;
712                 uint64_t matched = do_search_file(needles, filename);
713                 int ret;
714                 do {
715                         ret = write(pipefd[1], &matched, sizeof(matched));
716                 } while (ret == -1 && errno == EINTR);
717                 if (ret != sizeof(matched)) {
718                         perror("write");
719                         _exit(EXIT_FAILURE);
720                 }
721                 fflush(stdout);
722                 _exit(EXIT_SUCCESS);
723         }
724         case -1:
725                 // Error.
726                 perror("fork");
727                 exit(EXIT_FAILURE);
728         default:
729                 // Parent.
730                 close(pipefd[1]);
731                 break;
732         }
733
734         // Wait for the child to finish.
735         int wstatus;
736         pid_t err;
737         do {
738                 err = waitpid(child_pid, &wstatus, 0);
739         } while (err == -1 && errno == EINTR);
740         if (err == -1) {
741                 perror("waitpid");
742                 exit(EXIT_FAILURE);
743         }
744         if (WIFEXITED(wstatus)) {
745                 if (WEXITSTATUS(wstatus) != 0) {
746                         // The child has probably already printed out its error, so just propagate the exit status.
747                         exit(WEXITSTATUS(wstatus));
748                 }
749                 // Success!
750         } else if (!WIFEXITED(wstatus)) {
751                 fprintf(stderr, "FATAL: Child died unexpectedly while processing %s\n", filename.c_str());
752                 exit(1);
753         }
754
755         // Now get the number of matches from the child.
756         uint64_t matched;
757         int ret;
758         do {
759                 ret = read(pipefd[0], &matched, sizeof(matched));
760         } while (ret == -1 && errno == EINTR);
761         if (ret == -1) {
762                 perror("read");
763                 exit(EXIT_FAILURE);
764         } else if (ret != sizeof(matched)) {
765                 fprintf(stderr, "FATAL: Short read through pipe (got %d bytes)\n", ret);
766                 exit(EXIT_FAILURE);
767         }
768         close(pipefd[0]);
769         return matched;
770 }
771
772 // Parses a colon-separated list of strings and appends them onto the given vector.
773 // Backslash escapes whatever comes after it.
774 void parse_dbpaths(const char *ptr, vector<string> *output)
775 {
776         string str;
777         while (*ptr != '\0') {
778                 if (*ptr == '\\') {
779                         if (ptr[1] == '\0') {
780                                 fprintf(stderr, "ERROR: Escape character at the end of string\n");
781                                 exit(EXIT_FAILURE);
782                         }
783                         // Escape.
784                         str.push_back(ptr[1]);
785                         ptr += 2;
786                         continue;
787                 }
788                 if (*ptr == ':') {
789                         // Separator.
790                         output->push_back(move(str));
791                         ++ptr;
792                         continue;
793                 }
794                 str.push_back(*ptr++);
795         }
796         output->push_back(move(str));
797 }
798
799 void usage()
800 {
801         printf(
802                 "Usage: plocate [OPTION]... PATTERN...\n"
803                 "\n"
804                 "  -b, --basename         search only the file name portion of path names\n"
805                 "  -c, --count            print number of matches instead of the matches\n"
806                 "  -d, --database DBPATH  search for files in DBPATH\n"
807                 "                         (default is " DBFILE ")\n"
808                 "  -i, --ignore-case      search case-insensitively\n"
809                 "  -l, --limit LIMIT      stop after LIMIT matches\n"
810                 "  -0, --null             delimit matches by NUL instead of newline\n"
811                 "  -r, --regexp           interpret patterns as basic regexps (slow)\n"
812                 "      --regex            interpret patterns as extended regexps (slow)\n"
813                 "  -w, --wholename        search the entire path name (default; see -b)\n"
814                 "      --help             print this help\n"
815                 "      --version          print version information\n");
816 }
817
818 void version()
819 {
820         printf("%s %s\n", PACKAGE_NAME, PACKAGE_VERSION);
821         printf("Copyright 2020 Steinar H. Gunderson\n");
822         printf("License GPLv2+: GNU GPL version 2 or later <https://gnu.org/licenses/gpl.html>.\n");
823         printf("This is free software: you are free to change and redistribute it.\n");
824         printf("There is NO WARRANTY, to the extent permitted by law.\n");
825         exit(0);
826 }
827
828 int main(int argc, char **argv)
829 {
830         vector<string> dbpaths;
831
832         constexpr int EXTENDED_REGEX = 1000;
833         constexpr int FLUSH_CACHE = 1001;
834         static const struct option long_options[] = {
835                 { "help", no_argument, 0, 'h' },
836                 { "count", no_argument, 0, 'c' },
837                 { "basename", no_argument, 0, 'b' },
838                 { "database", required_argument, 0, 'd' },
839                 { "existing", no_argument, 0, 'e' },
840                 { "ignore-case", no_argument, 0, 'i' },
841                 { "limit", required_argument, 0, 'l' },
842                 { "null", no_argument, 0, '0' },
843                 { "version", no_argument, 0, 'V' },
844                 { "regexp", no_argument, 0, 'r' },
845                 { "regex", no_argument, 0, EXTENDED_REGEX },
846                 { "wholename", no_argument, 0, 'w' },
847                 { "debug", no_argument, 0, 'D' },  // Not documented.
848                 // Enable to test cold-cache behavior (except for access()). Not documented.
849                 { "flush-cache", no_argument, 0, FLUSH_CACHE },
850                 { 0, 0, 0, 0 }
851         };
852
853         setlocale(LC_ALL, "");
854         for (;;) {
855                 int option_index = 0;
856                 int c = getopt_long(argc, argv, "bcd:ehil:n:0rwVD", long_options, &option_index);
857                 if (c == -1) {
858                         break;
859                 }
860                 switch (c) {
861                 case 'b':
862                         match_basename = true;
863                         break;
864                 case 'c':
865                         only_count = true;
866                         break;
867                 case 'd':
868                         parse_dbpaths(optarg, &dbpaths);
869                         break;
870                 case 'e':
871                         check_existence = true;
872                         break;
873                 case 'h':
874                         usage();
875                         exit(0);
876                 case 'i':
877                         ignore_case = true;
878                         break;
879                 case 'l':
880                 case 'n':
881                         limit_matches = limit_left = atoll(optarg);
882                         if (limit_matches <= 0) {
883                                 fprintf(stderr, "Error: limit must be a strictly positive number.\n");
884                                 exit(1);
885                         }
886                         break;
887                 case '0':
888                         print_nul = true;
889                         break;
890                 case 'r':
891                         patterns_are_regex = true;
892                         break;
893                 case EXTENDED_REGEX:
894                         patterns_are_regex = true;
895                         use_extended_regex = true;
896                         break;
897                 case 'w':
898                         match_basename = false;  // No-op unless -b is given first.
899                         break;
900                 case 'D':
901                         use_debug = true;
902                         break;
903                 case FLUSH_CACHE:
904                         flush_cache = true;
905                         break;
906                 case 'V':
907                         version();
908                         break;
909                 default:
910                         exit(1);
911                 }
912         }
913
914         if (use_debug || flush_cache) {
915                 // Debug information would leak information about which files exist,
916                 // so drop setgid before we open the file; one would either need to run
917                 // as root, or use a locally-built file. Doing the same thing for
918                 // flush_cache is mostly paranoia, in an attempt to prevent random users
919                 // from making plocate slow for everyone else.
920                 if (setgid(getgid()) != 0) {
921                         perror("setgid");
922                         exit(EXIT_FAILURE);
923                 }
924         }
925
926         if (!print_nul) {
927                 stdout_is_tty = isatty(1);
928         }
929
930         vector<Needle> needles;
931         for (int i = optind; i < argc; ++i) {
932                 Needle needle;
933                 needle.str = argv[i];
934
935                 // See if there are any wildcard characters, which indicates we should treat it
936                 // as an (anchored) glob.
937                 bool any_wildcard = false;
938                 for (size_t i = 0; i < needle.str.size(); i += read_unigram(needle.str, i).second) {
939                         if (read_unigram(needle.str, i).first == WILDCARD_UNIGRAM) {
940                                 any_wildcard = true;
941                                 break;
942                         }
943                 }
944
945                 if (patterns_are_regex) {
946                         needle.type = Needle::REGEX;
947                         needle.re = compile_regex(needle.str);
948                 } else if (any_wildcard) {
949                         needle.type = Needle::GLOB;
950                 } else if (ignore_case) {
951                         // strcasestr() doesn't handle locales correctly (even though LSB
952                         // claims it should), but somehow, fnmatch() does, and it's about
953                         // the same speed as using a regex.
954                         needle.type = Needle::GLOB;
955                         needle.str = "*" + needle.str + "*";
956                 } else {
957                         needle.type = Needle::STRSTR;
958                         needle.str = unescape_glob_to_plain_string(needle.str);
959                 }
960                 needles.push_back(move(needle));
961         }
962         if (needles.empty()) {
963                 fprintf(stderr, "plocate: no pattern to search for specified\n");
964                 exit(0);
965         }
966
967         if (dbpaths.empty()) {
968                 // No -d given, so use our default. Note that this happens
969                 // even if LOCATE_PATH exists, to match mlocate behavior.
970                 dbpaths.push_back(DBFILE);
971         }
972
973         const char *locate_path = getenv("LOCATE_PATH");
974         if (locate_path != nullptr) {
975                 parse_dbpaths(locate_path, &dbpaths);
976         }
977
978         uint64_t matched = 0;
979         for (size_t i = 0; i < dbpaths.size(); ++i) {
980                 uint64_t this_matched;
981                 if (i != dbpaths.size() - 1) {
982                         this_matched = do_search_file_in_child(needles, dbpaths[i]);
983                 } else {
984                         this_matched = do_search_file(needles, dbpaths[i]);
985                 }
986                 matched += this_matched;
987                 limit_left -= this_matched;
988         }
989         if (only_count) {
990                 printf("%" PRId64 "\n", matched);
991         }
992 }