int64_t limit_left = numeric_limits<int64_t>::max();
steady_clock::time_point start;
+ZSTD_DDict *ddict = nullptr;
void apply_limit()
{
dprintf("Done in %.1f ms, found %" PRId64 " matches.\n",
1e3 * duration<float>(steady_clock::now() - start).count(), limit_matches);
if (only_count) {
- printf("%ld\n", limit_matches);
+ printf("%" PRId64 "\n", limit_matches);
}
exit(0);
}
void AccessRXCache::check_access(const char *filename, bool allow_async, function<void(bool)> cb)
{
- if (!engine->get_supports_stat()) {
+ if (engine == nullptr || !engine->get_supports_stat()) {
allow_async = false;
}
{
return hdr.filename_index_offset_bytes + docid * sizeof(uint64_t);
}
+ const Header &get_hdr() const { return hdr; }
public:
const int fd;
: fd(fd), engine(engine)
{
// Enable to test cold-cache behavior (except for access()).
- if (false) {
+ if (true) {
off_t len = lseek(fd, 0, SEEK_END);
if (len == -1) {
perror("lseek");
fprintf(stderr, "plocate.db is corrupt or an old version; please rebuild it.\n");
exit(1);
}
- if (hdr.version != 0) {
- fprintf(stderr, "plocate.db has version %u, expected 0; please rebuild it.\n", hdr.version);
+ if (hdr.version != 0 && hdr.version != 1) {
+ fprintf(stderr, "plocate.db has version %u, expected 0 or 1; please rebuild it.\n", hdr.version);
exit(1);
}
+ if (hdr.version == 0) {
+ // These will be junk data.
+ hdr.zstd_dictionary_offset_bytes = 0;
+ hdr.zstd_dictionary_length_bytes = 0;
+ }
}
Corpus::~Corpus()
}
void scan_file_block(const vector<Needle> &needles, string_view compressed,
- AccessRXCache *access_rx_cache, uint64_t seq, Serializer *serializer, IOUringEngine *engine,
- size_t *matched)
+ AccessRXCache *access_rx_cache, uint64_t seq, Serializer *serializer,
+ uint64_t *matched)
{
unsigned long long uncompressed_len = ZSTD_getFrameContentSize(compressed.data(), compressed.size());
if (uncompressed_len == ZSTD_CONTENTSIZE_UNKNOWN || uncompressed_len == ZSTD_CONTENTSIZE_ERROR) {
string block;
block.resize(uncompressed_len + 1);
- size_t err = ZSTD_decompress(&block[0], block.size(), compressed.data(),
- compressed.size());
+ static ZSTD_DCtx *ctx = ZSTD_createDCtx(); // Reused across calls.
+ size_t err;
+
+ if (ddict != nullptr) {
+ err = ZSTD_decompress_usingDDict(ctx, &block[0], block.size(), compressed.data(),
+ compressed.size(), ddict);
+ } else {
+ err = ZSTD_decompressDCtx(ctx, &block[0], block.size(), compressed.data(),
+ compressed.size());
+ }
if (ZSTD_isError(err)) {
fprintf(stderr, "ZSTD_decompress(): %s\n", ZSTD_getErrorName(err));
exit(1);
uint64_t matched = 0;
for (size_t i = 0; i < docids.size(); ++i) {
uint32_t docid = docids[i];
- corpus.get_compressed_filename_block(docid, [i, &matched, &needles, &access_rx_cache, engine, &docids_in_order](string_view compressed) {
- scan_file_block(needles, compressed, &access_rx_cache, i, &docids_in_order, engine, &matched);
+ corpus.get_compressed_filename_block(docid, [i, &matched, &needles, &access_rx_cache, &docids_in_order](string_view compressed) {
+ scan_file_block(needles, compressed, &access_rx_cache, i, &docids_in_order, &matched);
});
}
engine->finish();
// We do this sequentially, as it's faster than scattering
// a lot of I/O through io_uring and hoping the kernel will
// coalesce it plus readahead for us.
-uint64_t scan_all_docids(const vector<Needle> &needles, int fd, const Corpus &corpus, IOUringEngine *engine)
+uint64_t scan_all_docids(const vector<Needle> &needles, int fd, const Corpus &corpus)
{
- AccessRXCache access_rx_cache(engine);
+ {
+ const Header &hdr = corpus.get_hdr();
+ if (hdr.zstd_dictionary_length_bytes > 0) {
+ string dictionary;
+ dictionary.resize(hdr.zstd_dictionary_length_bytes);
+ complete_pread(fd, &dictionary[0], hdr.zstd_dictionary_length_bytes, hdr.zstd_dictionary_offset_bytes);
+ ddict = ZSTD_createDDict(dictionary.data(), dictionary.size());
+ }
+ }
+
+ AccessRXCache access_rx_cache(nullptr);
+ Serializer serializer; // Mostly dummy; handles only the limit.
uint32_t num_blocks = corpus.get_num_filename_blocks();
unique_ptr<uint64_t[]> offsets(new uint64_t[num_blocks + 1]);
complete_pread(fd, offsets.get(), (num_blocks + 1) * sizeof(uint64_t), corpus.offset_for_block(0));
for (uint32_t docid = io_docid; docid < last_docid; ++docid) {
size_t relative_offset = offsets[docid] - offsets[io_docid];
size_t len = offsets[docid + 1] - offsets[docid];
- scan_file_block(needles, { &compressed[relative_offset], len }, &access_rx_cache, 0, nullptr, engine, &matched);
+ scan_file_block(needles, { &compressed[relative_offset], len }, &access_rx_cache, docid, &serializer, &matched);
}
}
return matched;
// (We could have searched through all trigrams that matched
// the pattern and done a union of them, but that's a lot of
// work for fairly unclear gain.)
- uint64_t matched = scan_all_docids(needles, fd, corpus, &engine);
+ uint64_t matched = scan_all_docids(needles, fd, corpus);
if (only_count) {
printf("%" PRId64 "\n", matched);
}
return;
}
+ // Sneak in fetching the dictionary, if present. It's not necessarily clear
+ // exactly where it would be cheapest to get it, but it needs to be present
+ // before we can decode any of the posting lists. Most likely, it's
+ // in the same filesystem block as the header anyway, so it should be
+ // present in the cache.
+ {
+ const Header &hdr = corpus.get_hdr();
+ if (hdr.zstd_dictionary_length_bytes > 0) {
+ engine.submit_read(fd, hdr.zstd_dictionary_length_bytes, hdr.zstd_dictionary_offset_bytes, [](string_view s) {
+ ddict = ZSTD_createDDict(s.data(), s.size());
+ dprintf("Dictionary initialized after %.1f ms.\n", 1e3 * duration<float>(steady_clock::now() - start).count());
+ });
+ }
+ }
+
// Look them all up on disk.
for (auto &[trgm, trigram_groups] : trigrams_to_lookup) {
corpus.find_trigram(trgm, [trgm{ trgm }, trigram_groups{ &trigram_groups }](const Trigram *trgmptr, size_t len) {