X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=plocate.cpp;h=c270e79eff24f99481dca08ae47b4cc7abdf6651;hb=6aff0c02ce9e25495986e401cb3b3a9796ab81b3;hp=eb00adac8cced544b8ef0db9524f85cfcd146722;hpb=15a1b29147914bcea778d99e8bedac7df410fc62;p=plocate diff --git a/plocate.cpp b/plocate.cpp index eb00ada..c270e79 100644 --- a/plocate.cpp +++ b/plocate.cpp @@ -2,23 +2,27 @@ #include "dprintf.h" #include "io_uring_engine.h" #include "parse_trigrams.h" +#include "serializer.h" #include "turbopfor.h" #include "unique_sort.h" #include #include +#include #include +#include +#include #include #include #include #include #include -#include #include #include +#include #include #include -#include +#include #include #include #include @@ -26,6 +30,8 @@ #include #include #include +#include +#include #include #include #include @@ -49,77 +55,9 @@ int64_t limit_matches = numeric_limits::max(); int64_t limit_left = numeric_limits::max(); steady_clock::time_point start; +ZSTD_DDict *ddict = nullptr; -void apply_limit() -{ - if (--limit_left > 0) { - return; - } - dprintf("Done in %.1f ms, found %" PRId64 " matches.\n", - 1e3 * duration(steady_clock::now() - start).count(), limit_matches); - if (only_count) { - printf("%" PRId64 "\n", limit_matches); - } - exit(0); -} - -class Serializer { -public: - ~Serializer() { assert(limit_left <= 0 || pending.empty()); } - void print(uint64_t seq, uint64_t skip, const string msg); - -private: - uint64_t next_seq = 0; - struct Element { - uint64_t seq, skip; - string msg; - - bool operator<(const Element &other) const - { - return seq > other.seq; - } - }; - priority_queue pending; -}; - -void Serializer::print(uint64_t seq, uint64_t skip, const string msg) -{ - if (only_count) { - if (!msg.empty()) { - apply_limit(); - } - return; - } - - if (next_seq != seq) { - pending.push(Element{ seq, skip, move(msg) }); - return; - } - - if (!msg.empty()) { - if (print_nul) { - printf("%s%c", msg.c_str(), 0); - } else { - printf("%s\n", msg.c_str()); - } - apply_limit(); - } - next_seq += skip; - - // See if any delayed prints can now be dealt with. - while (!pending.empty() && pending.top().seq == next_seq) { - if (!pending.top().msg.empty()) { - if (print_nul) { - printf("%s%c", pending.top().msg.c_str(), 0); - } else { - printf("%s\n", pending.top().msg.c_str()); - } - apply_limit(); - } - next_seq += pending.top().skip; - pending.pop(); - } -} +regex_t compile_regex(const string &needle); struct Needle { enum { STRSTR, @@ -156,10 +94,12 @@ private: }; map> pending_stats; IOUringEngine *engine; + mutex mu; }; void AccessRXCache::check_access(const char *filename, bool allow_async, function cb) { + lock_guard lock(mu); if (engine == nullptr || !engine->get_supports_stat()) { allow_async = false; } @@ -232,6 +172,7 @@ public: { return hdr.filename_index_offset_bytes + docid * sizeof(uint64_t); } + const Header &get_hdr() const { return hdr; } public: const int fd; @@ -258,10 +199,15 @@ Corpus::Corpus(int fd, IOUringEngine *engine) fprintf(stderr, "plocate.db is corrupt or an old version; please rebuild it.\n"); exit(1); } - if (hdr.version != 0) { - fprintf(stderr, "plocate.db has version %u, expected 0; please rebuild it.\n", hdr.version); + if (hdr.version != 0 && hdr.version != 1) { + fprintf(stderr, "plocate.db has version %u, expected 0 or 1; please rebuild it.\n", hdr.version); exit(1); } + if (hdr.version == 0) { + // These will be junk data. + hdr.zstd_dictionary_offset_bytes = 0; + hdr.zstd_dictionary_length_bytes = 0; + } } Corpus::~Corpus() @@ -304,8 +250,8 @@ size_t Corpus::get_num_filename_blocks() const } void scan_file_block(const vector &needles, string_view compressed, - AccessRXCache *access_rx_cache, uint64_t seq, Serializer *serializer, - uint64_t *matched) + AccessRXCache *access_rx_cache, uint64_t seq, ResultReceiver *serializer, + atomic *matched) { unsigned long long uncompressed_len = ZSTD_getFrameContentSize(compressed.data(), compressed.size()); if (uncompressed_len == ZSTD_CONTENTSIZE_UNKNOWN || uncompressed_len == ZSTD_CONTENTSIZE_ERROR) { @@ -316,8 +262,16 @@ void scan_file_block(const vector &needles, string_view compressed, string block; block.resize(uncompressed_len + 1); - size_t err = ZSTD_decompress(&block[0], block.size(), compressed.data(), - compressed.size()); + static thread_local ZSTD_DCtx *ctx = ZSTD_createDCtx(); // Reused across calls. + size_t err; + + if (ddict != nullptr) { + err = ZSTD_decompress_usingDDict(ctx, &block[0], block.size(), compressed.data(), + compressed.size(), ddict); + } else { + err = ZSTD_decompressDCtx(ctx, &block[0], block.size(), compressed.data(), + compressed.size()); + } if (ZSTD_isError(err)) { fprintf(stderr, "ZSTD_decompress(): %s\n", ZSTD_getErrorName(err)); exit(1); @@ -370,7 +324,7 @@ size_t scan_docids(const vector &needles, const vector &docids { Serializer docids_in_order; AccessRXCache access_rx_cache(engine); - uint64_t matched = 0; + atomic matched{ 0 }; for (size_t i = 0; i < docids.size(); ++i) { uint32_t docid = docids[i]; corpus.get_compressed_filename_block(docid, [i, &matched, &needles, &access_rx_cache, &docids_in_order](string_view compressed) { @@ -381,18 +335,124 @@ size_t scan_docids(const vector &needles, const vector &docids return matched; } +struct WorkerThread { + thread t; + + // We use a result queue instead of synchronizing Serializer, + // since a lock on it becomes a huge choke point if there are + // lots of threads. + mutex result_mu; + struct Result { + uint64_t seq; + uint64_t skip; + string msg; + }; + vector results; +}; + +class WorkerThreadReceiver : public ResultReceiver { +public: + WorkerThreadReceiver(WorkerThread *wt) + : wt(wt) {} + + void print(uint64_t seq, uint64_t skip, const string msg) override + { + lock_guard lock(wt->result_mu); + if (msg.empty() && !wt->results.empty() && wt->results.back().seq + wt->results.back().skip == seq) { + wt->results.back().skip += skip; + } else { + wt->results.emplace_back(WorkerThread::Result{ seq, skip, move(msg) }); + } + } + +private: + WorkerThread *wt; +}; + +void deliver_results(WorkerThread *wt, Serializer *serializer) +{ + vector results; + { + lock_guard lock(wt->result_mu); + results = move(wt->results); + } + for (const WorkerThread::Result &result : results) { + serializer->print(result.seq, result.skip, move(result.msg)); + } +} + // We do this sequentially, as it's faster than scattering // a lot of I/O through io_uring and hoping the kernel will -// coalesce it plus readahead for us. +// coalesce it plus readahead for us. Since we assume that +// we will primarily be CPU-bound, we'll be firing up one +// worker thread for each spare core (the last one will +// only be doing I/O). access() is still synchronous. uint64_t scan_all_docids(const vector &needles, int fd, const Corpus &corpus) { + { + const Header &hdr = corpus.get_hdr(); + if (hdr.zstd_dictionary_length_bytes > 0) { + string dictionary; + dictionary.resize(hdr.zstd_dictionary_length_bytes); + complete_pread(fd, &dictionary[0], hdr.zstd_dictionary_length_bytes, hdr.zstd_dictionary_offset_bytes); + ddict = ZSTD_createDDict(dictionary.data(), dictionary.size()); + } + } + AccessRXCache access_rx_cache(nullptr); - Serializer serializer; // Mostly dummy; handles only the limit. + Serializer serializer; uint32_t num_blocks = corpus.get_num_filename_blocks(); unique_ptr offsets(new uint64_t[num_blocks + 1]); complete_pread(fd, offsets.get(), (num_blocks + 1) * sizeof(uint64_t), corpus.offset_for_block(0)); + atomic matched{ 0 }; + + mutex mu; + condition_variable queue_added, queue_removed; + deque> work_queue; // Under mu. + bool done = false; // Under mu. + + unsigned num_threads = max(sysconf(_SC_NPROCESSORS_ONLN) - 1, 1); + dprintf("Using %u worker threads for linear scan.\n", num_threads); + unique_ptr threads(new WorkerThread[num_threads]); + for (unsigned i = 0; i < num_threads; ++i) { + threads[i].t = thread([&threads, &mu, &queue_added, &queue_removed, &work_queue, &done, &offsets, &needles, &access_rx_cache, &matched, i] { + // regcomp() takes a lock on the regex, so each thread will need its own. + const vector *use_needles = &needles; + vector recompiled_needles; + if (i != 0 && patterns_are_regex) { + recompiled_needles = needles; + for (Needle &needle : recompiled_needles) { + needle.re = compile_regex(needle.str); + } + use_needles = &recompiled_needles; + } + + WorkerThreadReceiver receiver(&threads[i]); + for (;;) { + uint32_t io_docid, last_docid; + string compressed; + + { + unique_lock lock(mu); + queue_added.wait(lock, [&work_queue, &done] { return !work_queue.empty() || done; }); + if (done && work_queue.empty()) { + return; + } + tie(io_docid, last_docid, compressed) = move(work_queue.front()); + work_queue.pop_front(); + queue_removed.notify_all(); + } + + for (uint32_t docid = io_docid; docid < last_docid; ++docid) { + size_t relative_offset = offsets[docid] - offsets[io_docid]; + size_t len = offsets[docid + 1] - offsets[docid]; + scan_file_block(*use_needles, { &compressed[relative_offset], len }, &access_rx_cache, docid, &receiver, &matched); + } + } + }); + } + string compressed; - uint64_t matched = 0; for (uint32_t io_docid = 0; io_docid < num_blocks; io_docid += 32) { uint32_t last_docid = std::min(io_docid + 32, num_blocks); size_t io_len = offsets[last_docid] - offsets[io_docid]; @@ -401,11 +461,27 @@ uint64_t scan_all_docids(const vector &needles, int fd, const Corpus &co } complete_pread(fd, &compressed[0], io_len, offsets[io_docid]); - for (uint32_t docid = io_docid; docid < last_docid; ++docid) { - size_t relative_offset = offsets[docid] - offsets[io_docid]; - size_t len = offsets[docid + 1] - offsets[docid]; - scan_file_block(needles, { &compressed[relative_offset], len }, &access_rx_cache, docid, &serializer, &matched); + { + unique_lock lock(mu); + queue_removed.wait(lock, [&work_queue] { return work_queue.size() < 256; }); // Allow ~2MB of data queued up. + work_queue.emplace_back(io_docid, last_docid, move(compressed)); + queue_added.notify_one(); // Avoid the thundering herd. } + + // Pick up some results, so that we are sure that we won't just overload. + // (Seemingly, going through all of these causes slowness with many threads, + // but taking only one is OK.) + unsigned i = io_docid / 32; + deliver_results(&threads[i % num_threads], &serializer); + } + { + lock_guard lock(mu); + done = true; + queue_added.notify_all(); + } + for (unsigned i = 0; i < num_threads; ++i) { + threads[i].t.join(); + deliver_results(&threads[i], &serializer); } return matched; } @@ -523,6 +599,21 @@ void do_search_file(const vector &needles, const char *filename) return; } + // Sneak in fetching the dictionary, if present. It's not necessarily clear + // exactly where it would be cheapest to get it, but it needs to be present + // before we can decode any of the posting lists. Most likely, it's + // in the same filesystem block as the header anyway, so it should be + // present in the cache. + { + const Header &hdr = corpus.get_hdr(); + if (hdr.zstd_dictionary_length_bytes > 0) { + engine.submit_read(fd, hdr.zstd_dictionary_length_bytes, hdr.zstd_dictionary_offset_bytes, [](string_view s) { + ddict = ZSTD_createDDict(s.data(), s.size()); + dprintf("Dictionary initialized after %.1f ms.\n", 1e3 * duration(steady_clock::now() - start).count()); + }); + } + } + // Look them all up on disk. for (auto &[trgm, trigram_groups] : trigrams_to_lookup) { corpus.find_trigram(trgm, [trgm{ trgm }, trigram_groups{ &trigram_groups }](const Trigram *trgmptr, size_t len) {