]> git.sesse.net Git - plocate/blob - plocate.cpp
Add the missing end timing if linear scan and --debug is used together.
[plocate] / plocate.cpp
1 #include "access_rx_cache.h"
2 #include "db.h"
3 #include "dprintf.h"
4 #include "io_uring_engine.h"
5 #include "needle.h"
6 #include "parse_trigrams.h"
7 #include "serializer.h"
8 #include "turbopfor.h"
9 #include "unique_sort.h"
10
11 #include <algorithm>
12 #include <assert.h>
13 #include <atomic>
14 #include <chrono>
15 #include <condition_variable>
16 #include <deque>
17 #include <fcntl.h>
18 #include <functional>
19 #include <getopt.h>
20 #include <inttypes.h>
21 #include <iterator>
22 #include <limits>
23 #include <locale.h>
24 #include <memory>
25 #include <mutex>
26 #include <regex.h>
27 #include <stdint.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <string>
32 #include <string_view>
33 #include <thread>
34 #include <tuple>
35 #include <unistd.h>
36 #include <unordered_map>
37 #include <unordered_set>
38 #include <utility>
39 #include <vector>
40 #include <zstd.h>
41
42 using namespace std;
43 using namespace std::chrono;
44
45 #define DEFAULT_DBPATH "/var/lib/mlocate/plocate.db"
46
47 const char *dbpath = DEFAULT_DBPATH;
48 bool ignore_case = false;
49 bool only_count = false;
50 bool print_nul = false;
51 bool use_debug = false;
52 bool flush_cache = false;
53 bool patterns_are_regex = false;
54 bool use_extended_regex = false;
55 bool match_basename = false;
56 int64_t limit_matches = numeric_limits<int64_t>::max();
57 int64_t limit_left = numeric_limits<int64_t>::max();
58
59 steady_clock::time_point start;
60 ZSTD_DDict *ddict = nullptr;
61
62 class Corpus {
63 public:
64         Corpus(int fd, IOUringEngine *engine);
65         ~Corpus();
66         void find_trigram(uint32_t trgm, function<void(const Trigram *trgmptr, size_t len)> cb);
67         void get_compressed_filename_block(uint32_t docid, function<void(string_view)> cb) const;
68         size_t get_num_filename_blocks() const;
69         off_t offset_for_block(uint32_t docid) const
70         {
71                 return hdr.filename_index_offset_bytes + docid * sizeof(uint64_t);
72         }
73         const Header &get_hdr() const { return hdr; }
74
75 public:
76         const int fd;
77         IOUringEngine *const engine;
78
79         Header hdr;
80 };
81
82 Corpus::Corpus(int fd, IOUringEngine *engine)
83         : fd(fd), engine(engine)
84 {
85         if (flush_cache) {
86                 off_t len = lseek(fd, 0, SEEK_END);
87                 if (len == -1) {
88                         perror("lseek");
89                         exit(1);
90                 }
91                 posix_fadvise(fd, 0, len, POSIX_FADV_DONTNEED);
92         }
93
94         complete_pread(fd, &hdr, sizeof(hdr), /*offset=*/0);
95         if (memcmp(hdr.magic, "\0plocate", 8) != 0) {
96                 fprintf(stderr, "plocate.db is corrupt or an old version; please rebuild it.\n");
97                 exit(1);
98         }
99         if (hdr.version != 0 && hdr.version != 1) {
100                 fprintf(stderr, "plocate.db has version %u, expected 0 or 1; please rebuild it.\n", hdr.version);
101                 exit(1);
102         }
103         if (hdr.version == 0) {
104                 // These will be junk data.
105                 hdr.zstd_dictionary_offset_bytes = 0;
106                 hdr.zstd_dictionary_length_bytes = 0;
107         }
108 }
109
110 Corpus::~Corpus()
111 {
112         close(fd);
113 }
114
115 void Corpus::find_trigram(uint32_t trgm, function<void(const Trigram *trgmptr, size_t len)> cb)
116 {
117         uint32_t bucket = hash_trigram(trgm, hdr.hashtable_size);
118         engine->submit_read(fd, sizeof(Trigram) * (hdr.extra_ht_slots + 2), hdr.hash_table_offset_bytes + sizeof(Trigram) * bucket, [this, trgm, cb{ move(cb) }](string_view s) {
119                 const Trigram *trgmptr = reinterpret_cast<const Trigram *>(s.data());
120                 for (unsigned i = 0; i < hdr.extra_ht_slots + 1; ++i) {
121                         if (trgmptr[i].trgm == trgm) {
122                                 cb(trgmptr + i, trgmptr[i + 1].offset - trgmptr[i].offset);
123                                 return;
124                         }
125                 }
126
127                 // Not found.
128                 cb(nullptr, 0);
129         });
130 }
131
132 void Corpus::get_compressed_filename_block(uint32_t docid, function<void(string_view)> cb) const
133 {
134         // Read the file offset from this docid and the next one.
135         // This is always allowed, since we have a sentinel block at the end.
136         engine->submit_read(fd, sizeof(uint64_t) * 2, offset_for_block(docid), [this, cb{ move(cb) }](string_view s) {
137                 const uint64_t *ptr = reinterpret_cast<const uint64_t *>(s.data());
138                 off_t offset = ptr[0];
139                 size_t len = ptr[1] - ptr[0];
140                 engine->submit_read(fd, len, offset, cb);
141         });
142 }
143
144 size_t Corpus::get_num_filename_blocks() const
145 {
146         return hdr.num_docids;
147 }
148
149 void scan_file_block(const vector<Needle> &needles, string_view compressed,
150                      AccessRXCache *access_rx_cache, uint64_t seq, ResultReceiver *serializer,
151                      atomic<uint64_t> *matched)
152 {
153         unsigned long long uncompressed_len = ZSTD_getFrameContentSize(compressed.data(), compressed.size());
154         if (uncompressed_len == ZSTD_CONTENTSIZE_UNKNOWN || uncompressed_len == ZSTD_CONTENTSIZE_ERROR) {
155                 fprintf(stderr, "ZSTD_getFrameContentSize() failed\n");
156                 exit(1);
157         }
158
159         string block;
160         block.resize(uncompressed_len + 1);
161
162         static thread_local ZSTD_DCtx *ctx = ZSTD_createDCtx();  // Reused across calls.
163         size_t err;
164
165         if (ddict != nullptr) {
166                 err = ZSTD_decompress_usingDDict(ctx, &block[0], block.size(), compressed.data(),
167                                                  compressed.size(), ddict);
168         } else {
169                 err = ZSTD_decompressDCtx(ctx, &block[0], block.size(), compressed.data(),
170                                           compressed.size());
171         }
172         if (ZSTD_isError(err)) {
173                 fprintf(stderr, "ZSTD_decompress(): %s\n", ZSTD_getErrorName(err));
174                 exit(1);
175         }
176         block[block.size() - 1] = '\0';
177
178         auto test_candidate = [&](const char *filename, uint64_t local_seq, uint64_t next_seq) {
179                 access_rx_cache->check_access(filename, /*allow_async=*/true, [matched, serializer, local_seq, next_seq, filename{ strdup(filename) }](bool ok) {
180                         if (ok) {
181                                 ++*matched;
182                                 serializer->print(local_seq, next_seq - local_seq, filename);
183                         } else {
184                                 serializer->print(local_seq, next_seq - local_seq, "");
185                         }
186                         free(filename);
187                 });
188         };
189
190         // We need to know the next sequence number before inserting into Serializer,
191         // so always buffer one candidate.
192         const char *pending_candidate = nullptr;
193
194         uint64_t local_seq = seq << 32;
195         for (const char *filename = block.data();
196              filename != block.data() + block.size();
197              filename += strlen(filename) + 1) {
198                 const char *haystack = filename;
199                 if (match_basename) {
200                         haystack = strrchr(filename, '/');
201                         if (haystack == nullptr) {
202                                 haystack = filename;
203                         } else {
204                                 ++haystack;
205                         }
206                 }
207
208                 bool found = true;
209                 for (const Needle &needle : needles) {
210                         if (!matches(needle, haystack)) {
211                                 found = false;
212                                 break;
213                         }
214                 }
215                 if (found) {
216                         if (pending_candidate != nullptr) {
217                                 test_candidate(pending_candidate, local_seq, local_seq + 1);
218                                 ++local_seq;
219                         }
220                         pending_candidate = filename;
221                 }
222         }
223         if (pending_candidate == nullptr) {
224                 serializer->print(seq << 32, 1ULL << 32, "");
225         } else {
226                 test_candidate(pending_candidate, local_seq, (seq + 1) << 32);
227         }
228 }
229
230 size_t scan_docids(const vector<Needle> &needles, const vector<uint32_t> &docids, const Corpus &corpus, IOUringEngine *engine)
231 {
232         Serializer docids_in_order;
233         AccessRXCache access_rx_cache(engine);
234         atomic<uint64_t> matched{ 0 };
235         for (size_t i = 0; i < docids.size(); ++i) {
236                 uint32_t docid = docids[i];
237                 corpus.get_compressed_filename_block(docid, [i, &matched, &needles, &access_rx_cache, &docids_in_order](string_view compressed) {
238                         scan_file_block(needles, compressed, &access_rx_cache, i, &docids_in_order, &matched);
239                 });
240         }
241         engine->finish();
242         return matched;
243 }
244
245 struct WorkerThread {
246         thread t;
247
248         // We use a result queue instead of synchronizing Serializer,
249         // since a lock on it becomes a huge choke point if there are
250         // lots of threads.
251         mutex result_mu;
252         struct Result {
253                 uint64_t seq;
254                 uint64_t skip;
255                 string msg;
256         };
257         vector<Result> results;
258 };
259
260 class WorkerThreadReceiver : public ResultReceiver {
261 public:
262         WorkerThreadReceiver(WorkerThread *wt)
263                 : wt(wt) {}
264
265         void print(uint64_t seq, uint64_t skip, const string msg) override
266         {
267                 lock_guard<mutex> lock(wt->result_mu);
268                 if (msg.empty() && !wt->results.empty() && wt->results.back().seq + wt->results.back().skip == seq) {
269                         wt->results.back().skip += skip;
270                 } else {
271                         wt->results.emplace_back(WorkerThread::Result{ seq, skip, move(msg) });
272                 }
273         }
274
275 private:
276         WorkerThread *wt;
277 };
278
279 void deliver_results(WorkerThread *wt, Serializer *serializer)
280 {
281         vector<WorkerThread::Result> results;
282         {
283                 lock_guard<mutex> lock(wt->result_mu);
284                 results = move(wt->results);
285         }
286         for (const WorkerThread::Result &result : results) {
287                 serializer->print(result.seq, result.skip, move(result.msg));
288         }
289 }
290
291 // We do this sequentially, as it's faster than scattering
292 // a lot of I/O through io_uring and hoping the kernel will
293 // coalesce it plus readahead for us. Since we assume that
294 // we will primarily be CPU-bound, we'll be firing up one
295 // worker thread for each spare core (the last one will
296 // only be doing I/O). access() is still synchronous.
297 uint64_t scan_all_docids(const vector<Needle> &needles, int fd, const Corpus &corpus)
298 {
299         {
300                 const Header &hdr = corpus.get_hdr();
301                 if (hdr.zstd_dictionary_length_bytes > 0) {
302                         string dictionary;
303                         dictionary.resize(hdr.zstd_dictionary_length_bytes);
304                         complete_pread(fd, &dictionary[0], hdr.zstd_dictionary_length_bytes, hdr.zstd_dictionary_offset_bytes);
305                         ddict = ZSTD_createDDict(dictionary.data(), dictionary.size());
306                 }
307         }
308
309         AccessRXCache access_rx_cache(nullptr);
310         Serializer serializer;
311         uint32_t num_blocks = corpus.get_num_filename_blocks();
312         unique_ptr<uint64_t[]> offsets(new uint64_t[num_blocks + 1]);
313         complete_pread(fd, offsets.get(), (num_blocks + 1) * sizeof(uint64_t), corpus.offset_for_block(0));
314         atomic<uint64_t> matched{ 0 };
315
316         mutex mu;
317         condition_variable queue_added, queue_removed;
318         deque<tuple<int, int, string>> work_queue;  // Under mu.
319         bool done = false;  // Under mu.
320
321         unsigned num_threads = max<int>(sysconf(_SC_NPROCESSORS_ONLN) - 1, 1);
322         dprintf("Using %u worker threads for linear scan.\n", num_threads);
323         unique_ptr<WorkerThread[]> threads(new WorkerThread[num_threads]);
324         for (unsigned i = 0; i < num_threads; ++i) {
325                 threads[i].t = thread([&threads, &mu, &queue_added, &queue_removed, &work_queue, &done, &offsets, &needles, &access_rx_cache, &matched, i] {
326                         // regcomp() takes a lock on the regex, so each thread will need its own.
327                         const vector<Needle> *use_needles = &needles;
328                         vector<Needle> recompiled_needles;
329                         if (i != 0 && patterns_are_regex) {
330                                 recompiled_needles = needles;
331                                 for (Needle &needle : recompiled_needles) {
332                                         needle.re = compile_regex(needle.str);
333                                 }
334                                 use_needles = &recompiled_needles;
335                         }
336
337                         WorkerThreadReceiver receiver(&threads[i]);
338                         for (;;) {
339                                 uint32_t io_docid, last_docid;
340                                 string compressed;
341
342                                 {
343                                         unique_lock<mutex> lock(mu);
344                                         queue_added.wait(lock, [&work_queue, &done] { return !work_queue.empty() || done; });
345                                         if (done && work_queue.empty()) {
346                                                 return;
347                                         }
348                                         tie(io_docid, last_docid, compressed) = move(work_queue.front());
349                                         work_queue.pop_front();
350                                         queue_removed.notify_all();
351                                 }
352
353                                 for (uint32_t docid = io_docid; docid < last_docid; ++docid) {
354                                         size_t relative_offset = offsets[docid] - offsets[io_docid];
355                                         size_t len = offsets[docid + 1] - offsets[docid];
356                                         scan_file_block(*use_needles, { &compressed[relative_offset], len }, &access_rx_cache, docid, &receiver, &matched);
357                                 }
358                         }
359                 });
360         }
361
362         string compressed;
363         for (uint32_t io_docid = 0; io_docid < num_blocks; io_docid += 32) {
364                 uint32_t last_docid = std::min(io_docid + 32, num_blocks);
365                 size_t io_len = offsets[last_docid] - offsets[io_docid];
366                 if (compressed.size() < io_len) {
367                         compressed.resize(io_len);
368                 }
369                 complete_pread(fd, &compressed[0], io_len, offsets[io_docid]);
370
371                 {
372                         unique_lock<mutex> lock(mu);
373                         queue_removed.wait(lock, [&work_queue] { return work_queue.size() < 256; });  // Allow ~2MB of data queued up.
374                         work_queue.emplace_back(io_docid, last_docid, move(compressed));
375                         queue_added.notify_one();  // Avoid the thundering herd.
376                 }
377
378                 // Pick up some results, so that we are sure that we won't just overload.
379                 // (Seemingly, going through all of these causes slowness with many threads,
380                 // but taking only one is OK.)
381                 unsigned i = io_docid / 32;
382                 deliver_results(&threads[i % num_threads], &serializer);
383         }
384         {
385                 lock_guard<mutex> lock(mu);
386                 done = true;
387                 queue_added.notify_all();
388         }
389         for (unsigned i = 0; i < num_threads; ++i) {
390                 threads[i].t.join();
391                 deliver_results(&threads[i], &serializer);
392         }
393         return matched;
394 }
395
396 // Takes the given posting list, unions it into the parts of the trigram disjunction
397 // already read; if the list is complete, intersects with “cur_candidates”.
398 //
399 // Returns true if the search should be aborted (we are done).
400 bool new_posting_list_read(TrigramDisjunction *td, vector<uint32_t> decoded, vector<uint32_t> *cur_candidates, vector<uint32_t> *tmp)
401 {
402         if (td->docids.empty()) {
403                 td->docids = move(decoded);
404         } else {
405                 tmp->clear();
406                 set_union(decoded.begin(), decoded.end(), td->docids.begin(), td->docids.end(), back_inserter(*tmp));
407                 swap(*tmp, td->docids);
408         }
409         if (--td->remaining_trigrams_to_read > 0) {
410                 // Need to wait for more.
411                 if (ignore_case) {
412                         dprintf("  ... %u reads left in OR group %u (%zu docids in list)\n",
413                                 td->remaining_trigrams_to_read, td->index, td->docids.size());
414                 }
415                 return false;
416         }
417         if (cur_candidates->empty()) {
418                 if (ignore_case) {
419                         dprintf("  ... all reads done for OR group %u (%zu docids)\n",
420                                 td->index, td->docids.size());
421                 }
422                 *cur_candidates = move(td->docids);
423         } else {
424                 tmp->clear();
425                 set_intersection(cur_candidates->begin(), cur_candidates->end(),
426                                  td->docids.begin(), td->docids.end(),
427                                  back_inserter(*tmp));
428                 swap(*cur_candidates, *tmp);
429                 if (ignore_case) {
430                         if (cur_candidates->empty()) {
431                                 dprintf("  ... all reads done for OR group %u (%zu docids), intersected (none left, search is done)\n",
432                                         td->index, td->docids.size());
433                                 return true;
434                         } else {
435                                 dprintf("  ... all reads done for OR group %u (%zu docids), intersected (%zu left)\n",
436                                         td->index, td->docids.size(), cur_candidates->size());
437                         }
438                 }
439         }
440         return false;
441 }
442
443 void do_search_file(const vector<Needle> &needles, const char *filename)
444 {
445         int fd = open(filename, O_RDONLY);
446         if (fd == -1) {
447                 perror(filename);
448                 exit(1);
449         }
450
451         // Drop privileges.
452         if (setgid(getgid()) != 0) {
453                 perror("setgid");
454                 exit(EXIT_FAILURE);
455         }
456
457         start = steady_clock::now();
458         if (access("/", R_OK | X_OK)) {
459                 // We can't find anything, no need to bother...
460                 return;
461         }
462
463         IOUringEngine engine(/*slop_bytes=*/16);  // 16 slop bytes as described in turbopfor.h.
464         Corpus corpus(fd, &engine);
465         dprintf("Corpus init done after %.1f ms.\n", 1e3 * duration<float>(steady_clock::now() - start).count());
466
467         vector<TrigramDisjunction> trigram_groups;
468         if (patterns_are_regex) {
469                 // We could parse the regex to find trigrams that have to be there
470                 // (there are actually known algorithms to deal with disjunctions
471                 // and such, too), but for now, we just go brute force.
472                 // Using locate with regexes is pretty niche.
473         } else {
474                 for (const Needle &needle : needles) {
475                         parse_trigrams(needle.str, ignore_case, &trigram_groups);
476                 }
477         }
478
479         unique_sort(
480                 &trigram_groups,
481                 [](const TrigramDisjunction &a, const TrigramDisjunction &b) { return a.trigram_alternatives < b.trigram_alternatives; },
482                 [](const TrigramDisjunction &a, const TrigramDisjunction &b) { return a.trigram_alternatives == b.trigram_alternatives; });
483
484         // Give them names for debugging.
485         unsigned td_index = 0;
486         for (TrigramDisjunction &td : trigram_groups) {
487                 td.index = td_index++;
488         }
489
490         // Collect which trigrams we need to look up in the hash table.
491         unordered_map<uint32_t, vector<TrigramDisjunction *>> trigrams_to_lookup;
492         for (TrigramDisjunction &td : trigram_groups) {
493                 for (uint32_t trgm : td.trigram_alternatives) {
494                         trigrams_to_lookup[trgm].push_back(&td);
495                 }
496         }
497         if (trigrams_to_lookup.empty()) {
498                 // Too short for trigram matching. Apply brute force.
499                 // (We could have searched through all trigrams that matched
500                 // the pattern and done a union of them, but that's a lot of
501                 // work for fairly unclear gain.)
502                 uint64_t matched = scan_all_docids(needles, fd, corpus);
503                 dprintf("Done in %.1f ms, found %" PRId64 " matches.\n",
504                         1e3 * duration<float>(steady_clock::now() - start).count(), matched);
505                 if (only_count) {
506                         printf("%" PRId64 "\n", matched);
507                 }
508                 return;
509         }
510
511         // Sneak in fetching the dictionary, if present. It's not necessarily clear
512         // exactly where it would be cheapest to get it, but it needs to be present
513         // before we can decode any of the posting lists. Most likely, it's
514         // in the same filesystem block as the header anyway, so it should be
515         // present in the cache.
516         {
517                 const Header &hdr = corpus.get_hdr();
518                 if (hdr.zstd_dictionary_length_bytes > 0) {
519                         engine.submit_read(fd, hdr.zstd_dictionary_length_bytes, hdr.zstd_dictionary_offset_bytes, [](string_view s) {
520                                 ddict = ZSTD_createDDict(s.data(), s.size());
521                                 dprintf("Dictionary initialized after %.1f ms.\n", 1e3 * duration<float>(steady_clock::now() - start).count());
522                         });
523                 }
524         }
525
526         // Look them all up on disk.
527         for (auto &[trgm, trigram_groups] : trigrams_to_lookup) {
528                 corpus.find_trigram(trgm, [trgm{ trgm }, trigram_groups{ &trigram_groups }](const Trigram *trgmptr, size_t len) {
529                         if (trgmptr == nullptr) {
530                                 dprintf("trigram %s isn't found\n", print_trigram(trgm).c_str());
531                                 for (TrigramDisjunction *td : *trigram_groups) {
532                                         --td->remaining_trigrams_to_read;
533                                         if (td->remaining_trigrams_to_read == 0 && td->read_trigrams.empty()) {
534                                                 dprintf("zero matches in %s, so we are done\n", print_td(*td).c_str());
535                                                 if (only_count) {
536                                                         printf("0\n");
537                                                 }
538                                                 exit(0);
539                                         }
540                                 }
541                                 return;
542                         }
543                         for (TrigramDisjunction *td : *trigram_groups) {
544                                 --td->remaining_trigrams_to_read;
545                                 td->max_num_docids += trgmptr->num_docids;
546                                 td->read_trigrams.emplace_back(*trgmptr, len);
547                         }
548                 });
549         }
550         engine.finish();
551         dprintf("Hashtable lookups done after %.1f ms.\n", 1e3 * duration<float>(steady_clock::now() - start).count());
552
553         for (TrigramDisjunction &td : trigram_groups) {
554                 // Reset for reads.
555                 td.remaining_trigrams_to_read = td.read_trigrams.size();
556
557                 if (ignore_case) {  // If case-sensitive, they'll all be pretty obvious single-entry groups.
558                         dprintf("OR group %u (max_num_docids=%u): %s\n", td.index, td.max_num_docids, print_td(td).c_str());
559                 }
560         }
561
562         // TODO: For case-insensitive (ie. more than one alternative in each),
563         // prioritize the ones with fewer seeks?
564         sort(trigram_groups.begin(), trigram_groups.end(),
565              [&](const TrigramDisjunction &a, const TrigramDisjunction &b) {
566                      return a.max_num_docids < b.max_num_docids;
567              });
568
569         unordered_map<uint32_t, vector<TrigramDisjunction *>> uses_trigram;
570         for (TrigramDisjunction &td : trigram_groups) {
571                 for (uint32_t trgm : td.trigram_alternatives) {
572                         uses_trigram[trgm].push_back(&td);
573                 }
574         }
575
576         unordered_set<uint32_t> trigrams_submitted_read;
577         vector<uint32_t> cur_candidates, tmp, decoded;
578         bool done = false;
579         for (TrigramDisjunction &td : trigram_groups) {
580                 if (!cur_candidates.empty() && td.max_num_docids > cur_candidates.size() * 100) {
581                         dprintf("%s has up to %u entries, ignoring the rest (will "
582                                 "weed out false positives later)\n",
583                                 print_td(td).c_str(), td.max_num_docids);
584                         break;
585                 }
586
587                 for (auto &[trgmptr, len] : td.read_trigrams) {
588                         if (trigrams_submitted_read.count(trgmptr.trgm) != 0) {
589                                 continue;
590                         }
591                         trigrams_submitted_read.insert(trgmptr.trgm);
592                         // Only stay a certain amount ahead, so that we don't spend I/O
593                         // on reading the latter, large posting lists. We are unlikely
594                         // to need them anyway, even if they should come in first.
595                         if (engine.get_waiting_reads() >= 5) {
596                                 engine.finish();
597                                 if (done)
598                                         break;
599                         }
600                         engine.submit_read(fd, len, trgmptr.offset, [trgmptr{ trgmptr }, len{ len }, &done, &cur_candidates, &tmp, &decoded, &uses_trigram](string_view s) {
601                                 if (done)
602                                         return;
603
604                                 uint32_t trgm __attribute__((unused)) = trgmptr.trgm;
605                                 const unsigned char *pldata = reinterpret_cast<const unsigned char *>(s.data());
606                                 size_t num = trgmptr.num_docids;
607                                 decoded.resize(num);
608                                 decode_pfor_delta1_128(pldata, num, /*interleaved=*/true, &decoded[0]);
609
610                                 assert(uses_trigram.count(trgm) != 0);
611                                 bool was_empty = cur_candidates.empty();
612                                 if (ignore_case) {
613                                         dprintf("trigram %s (%zu bytes) decoded to %zu entries\n", print_trigram(trgm).c_str(), len, num);
614                                 }
615
616                                 for (TrigramDisjunction *td : uses_trigram[trgm]) {
617                                         done |= new_posting_list_read(td, decoded, &cur_candidates, &tmp);
618                                         if (done)
619                                                 break;
620                                 }
621                                 if (!ignore_case) {
622                                         if (was_empty) {
623                                                 dprintf("trigram %s (%zu bytes) decoded to %zu entries\n", print_trigram(trgm).c_str(), len, num);
624                                         } else if (cur_candidates.empty()) {
625                                                 dprintf("trigram %s (%zu bytes) decoded to %zu entries (none left, search is done)\n", print_trigram(trgm).c_str(), len, num);
626                                         } else {
627                                                 dprintf("trigram %s (%zu bytes) decoded to %zu entries (%zu left)\n", print_trigram(trgm).c_str(), len, num, cur_candidates.size());
628                                         }
629                                 }
630                         });
631                 }
632         }
633         engine.finish();
634         if (done) {
635                 return;
636         }
637         dprintf("Intersection done after %.1f ms. Doing final verification and printing:\n",
638                 1e3 * duration<float>(steady_clock::now() - start).count());
639
640         uint64_t matched = scan_docids(needles, cur_candidates, corpus, &engine);
641         dprintf("Done in %.1f ms, found %" PRId64 " matches.\n",
642                 1e3 * duration<float>(steady_clock::now() - start).count(), matched);
643
644         if (only_count) {
645                 printf("%" PRId64 "\n", matched);
646         }
647 }
648
649 void usage()
650 {
651         printf(
652                 "Usage: plocate [OPTION]... PATTERN...\n"
653                 "\n"
654                 "  -b, --basename         search only the file name portion of path names\n"
655                 "  -c, --count            print number of matches instead of the matches\n"
656                 "  -d, --database DBPATH  search for files in DBPATH\n"
657                 "                         (default is " DEFAULT_DBPATH ")\n"
658                 "  -i, --ignore-case      search case-insensitively\n"
659                 "  -l, --limit LIMIT      stop after LIMIT matches\n"
660                 "  -0, --null             delimit matches by NUL instead of newline\n"
661                 "  -r, --regexp           interpret patterns as basic regexps (slow)\n"
662                 "      --regex            interpret patterns as extended regexps (slow)\n"
663                 "  -w, --wholename        search the entire path name (default; see -b)\n"
664                 "      --help             print this help\n"
665                 "      --version          print version information\n");
666 }
667
668 void version()
669 {
670         printf("plocate %s\n", PLOCATE_VERSION);
671         printf("Copyright 2020 Steinar H. Gunderson\n");
672         printf("License GPLv2+: GNU GPL version 2 or later <https://gnu.org/licenses/gpl.html>.\n");
673         printf("This is free software: you are free to change and redistribute it.\n");
674         printf("There is NO WARRANTY, to the extent permitted by law.\n");
675         exit(0);
676 }
677
678 int main(int argc, char **argv)
679 {
680         constexpr int EXTENDED_REGEX = 1000;
681         constexpr int FLUSH_CACHE = 1001;
682         static const struct option long_options[] = {
683                 { "help", no_argument, 0, 'h' },
684                 { "count", no_argument, 0, 'c' },
685                 { "basename", no_argument, 0, 'b' },
686                 { "database", required_argument, 0, 'd' },
687                 { "ignore-case", no_argument, 0, 'i' },
688                 { "limit", required_argument, 0, 'l' },
689                 { "null", no_argument, 0, '0' },
690                 { "version", no_argument, 0, 'V' },
691                 { "regexp", no_argument, 0, 'r' },
692                 { "regex", no_argument, 0, EXTENDED_REGEX },
693                 { "wholename", no_argument, 0, 'w' },
694                 { "debug", no_argument, 0, 'D' },  // Not documented.
695                 // Enable to test cold-cache behavior (except for access()). Not documented.
696                 { "flush-cache", no_argument, 0, FLUSH_CACHE },
697                 { 0, 0, 0, 0 }
698         };
699
700         setlocale(LC_ALL, "");
701         for (;;) {
702                 int option_index = 0;
703                 int c = getopt_long(argc, argv, "bcd:hil:n:0wVD", long_options, &option_index);
704                 if (c == -1) {
705                         break;
706                 }
707                 switch (c) {
708                 case 'b':
709                         match_basename = true;
710                         break;
711                 case 'c':
712                         only_count = true;
713                         break;
714                 case 'd':
715                         dbpath = strdup(optarg);
716                         break;
717                 case 'h':
718                         usage();
719                         exit(0);
720                 case 'i':
721                         ignore_case = true;
722                         break;
723                 case 'l':
724                 case 'n':
725                         limit_matches = limit_left = atoll(optarg);
726                         if (limit_matches <= 0) {
727                                 fprintf(stderr, "Error: limit must be a strictly positive number.\n");
728                                 exit(1);
729                         }
730                         break;
731                 case '0':
732                         print_nul = true;
733                         break;
734                 case 'r':
735                         patterns_are_regex = true;
736                         break;
737                 case EXTENDED_REGEX:
738                         patterns_are_regex = true;
739                         use_extended_regex = true;
740                         break;
741                 case 'w':
742                         match_basename = false;  // No-op unless -b is given first.
743                         break;
744                 case 'D':
745                         use_debug = true;
746                         break;
747                 case FLUSH_CACHE:
748                         flush_cache = true;
749                         break;
750                 case 'V':
751                         version();
752                         break;
753                 default:
754                         exit(1);
755                 }
756         }
757
758         if (use_debug || flush_cache) {
759                 // Debug information would leak information about which files exist,
760                 // so drop setgid before we open the file; one would either need to run
761                 // as root, or use a locally-built file. Doing the same thing for
762                 // flush_cache is mostly paranoia, in an attempt to prevent random users
763                 // from making plocate slow for everyone else.
764                 if (setgid(getgid()) != 0) {
765                         perror("setgid");
766                         exit(EXIT_FAILURE);
767                 }
768         }
769
770         vector<Needle> needles;
771         for (int i = optind; i < argc; ++i) {
772                 Needle needle;
773                 needle.str = argv[i];
774
775                 // See if there are any wildcard characters, which indicates we should treat it
776                 // as an (anchored) glob.
777                 bool any_wildcard = false;
778                 for (size_t i = 0; i < needle.str.size(); i += read_unigram(needle.str, i).second) {
779                         if (read_unigram(needle.str, i).first == WILDCARD_UNIGRAM) {
780                                 any_wildcard = true;
781                                 break;
782                         }
783                 }
784
785                 if (patterns_are_regex) {
786                         needle.type = Needle::REGEX;
787                         needle.re = compile_regex(needle.str);
788                 } else if (any_wildcard) {
789                         needle.type = Needle::GLOB;
790                 } else if (ignore_case) {
791                         // strcasestr() doesn't handle locales correctly (even though LSB
792                         // claims it should), but somehow, fnmatch() does, and it's about
793                         // the same speed as using a regex.
794                         needle.type = Needle::GLOB;
795                         needle.str = "*" + needle.str + "*";
796                 } else {
797                         needle.type = Needle::STRSTR;
798                         needle.str = unescape_glob_to_plain_string(needle.str);
799                 }
800                 needles.push_back(move(needle));
801         }
802         if (needles.empty()) {
803                 fprintf(stderr, "plocate: no pattern to search for specified\n");
804                 exit(0);
805         }
806         do_search_file(needles, dbpath);
807 }