int64_t limit_left = numeric_limits<int64_t>::max();
steady_clock::time_point start;
+ZSTD_DDict *ddict = nullptr;
void apply_limit()
{
dprintf("Done in %.1f ms, found %" PRId64 " matches.\n",
1e3 * duration<float>(steady_clock::now() - start).count(), limit_matches);
if (only_count) {
- printf("%ld\n", limit_matches);
+ printf("%" PRId64 "\n", limit_matches);
}
exit(0);
}
{
return hdr.filename_index_offset_bytes + docid * sizeof(uint64_t);
}
+ const Header &get_hdr() const { return hdr; }
public:
const int fd;
: fd(fd), engine(engine)
{
// Enable to test cold-cache behavior (except for access()).
- if (false) {
+ if (true) {
off_t len = lseek(fd, 0, SEEK_END);
if (len == -1) {
perror("lseek");
fprintf(stderr, "plocate.db is corrupt or an old version; please rebuild it.\n");
exit(1);
}
- if (hdr.version != 0) {
- fprintf(stderr, "plocate.db has version %u, expected 0; please rebuild it.\n", hdr.version);
+ if (hdr.version != 0 && hdr.version != 1) {
+ fprintf(stderr, "plocate.db has version %u, expected 0 or 1; please rebuild it.\n", hdr.version);
exit(1);
}
+ if (hdr.version == 0) {
+ // These will be junk data.
+ hdr.zstd_dictionary_offset_bytes = 0;
+ hdr.zstd_dictionary_length_bytes = 0;
+ }
}
Corpus::~Corpus()
void scan_file_block(const vector<Needle> &needles, string_view compressed,
AccessRXCache *access_rx_cache, uint64_t seq, Serializer *serializer,
- size_t *matched)
+ uint64_t *matched)
{
unsigned long long uncompressed_len = ZSTD_getFrameContentSize(compressed.data(), compressed.size());
if (uncompressed_len == ZSTD_CONTENTSIZE_UNKNOWN || uncompressed_len == ZSTD_CONTENTSIZE_ERROR) {
string block;
block.resize(uncompressed_len + 1);
- size_t err = ZSTD_decompress(&block[0], block.size(), compressed.data(),
- compressed.size());
+ static ZSTD_DCtx *ctx = ZSTD_createDCtx(); // Reused across calls.
+ size_t err;
+
+ if (ddict != nullptr) {
+ err = ZSTD_decompress_usingDDict(ctx, &block[0], block.size(), compressed.data(),
+ compressed.size(), ddict);
+ } else {
+ err = ZSTD_decompressDCtx(ctx, &block[0], block.size(), compressed.data(),
+ compressed.size());
+ }
if (ZSTD_isError(err)) {
fprintf(stderr, "ZSTD_decompress(): %s\n", ZSTD_getErrorName(err));
exit(1);
// coalesce it plus readahead for us.
uint64_t scan_all_docids(const vector<Needle> &needles, int fd, const Corpus &corpus)
{
+ {
+ const Header &hdr = corpus.get_hdr();
+ if (hdr.zstd_dictionary_length_bytes > 0) {
+ string dictionary;
+ dictionary.resize(hdr.zstd_dictionary_length_bytes);
+ complete_pread(fd, &dictionary[0], hdr.zstd_dictionary_length_bytes, hdr.zstd_dictionary_offset_bytes);
+ ddict = ZSTD_createDDict(dictionary.data(), dictionary.size());
+ }
+ }
+
AccessRXCache access_rx_cache(nullptr);
Serializer serializer; // Mostly dummy; handles only the limit.
uint32_t num_blocks = corpus.get_num_filename_blocks();
return;
}
+ // Sneak in fetching the dictionary, if present. It's not necessarily clear
+ // exactly where it would be cheapest to get it, but it needs to be present
+ // before we can decode any of the posting lists. Most likely, it's
+ // in the same filesystem block as the header anyway, so it should be
+ // present in the cache.
+ {
+ const Header &hdr = corpus.get_hdr();
+ if (hdr.zstd_dictionary_length_bytes > 0) {
+ engine.submit_read(fd, hdr.zstd_dictionary_length_bytes, hdr.zstd_dictionary_offset_bytes, [](string_view s) {
+ ddict = ZSTD_createDDict(s.data(), s.size());
+ dprintf("Dictionary initialized after %.1f ms.\n", 1e3 * duration<float>(steady_clock::now() - start).count());
+ });
+ }
+ }
+
// Look them all up on disk.
for (auto &[trgm, trigram_groups] : trigrams_to_lookup) {
corpus.find_trigram(trgm, [trgm{ trgm }, trigram_groups{ &trigram_groups }](const Trigram *trgmptr, size_t len) {