2 #include "turbopfor-encode.h"
17 #include <string_view>
24 #define P4NENC_BOUND(n) ((n + 127) / 128 + (n + 32) * sizeof(uint32_t))
26 //#define dprintf(...) fprintf(stderr, __VA_ARGS__);
28 #define NUM_TRIGRAMS 16777216
31 using namespace std::chrono;
33 string zstd_compress(const string &src, ZSTD_CDict *cdict, string *tempbuf);
35 constexpr unsigned num_overflow_slots = 16;
37 static inline uint32_t read_unigram(const string_view s, size_t idx)
40 return (unsigned char)s[idx];
46 static inline uint32_t read_trigram(const string_view s, size_t start)
48 return read_unigram(s, start) |
49 (read_unigram(s, start + 1) << 8) |
50 (read_unigram(s, start + 2) << 16);
54 DBE_NORMAL = 0, /* A non-directory file */
55 DBE_DIRECTORY = 1, /* A directory */
56 DBE_END = 2 /* End of directory contents; contains no name */
64 uint8_t check_visibility;
75 class PostingListBuilder {
77 inline void add_docid(uint32_t docid);
81 size_t num_docids = 0;
84 void write_header(uint32_t docid);
87 vector<uint32_t> pending_deltas;
89 uint32_t last_block_end, last_docid = -1;
92 void PostingListBuilder::add_docid(uint32_t docid)
94 // Deduplicate against the last inserted value, if any.
95 if (docid == last_docid) {
99 if (num_docids == 0) {
103 last_block_end = last_docid = docid;
107 pending_deltas.push_back(docid - last_docid - 1);
109 if (pending_deltas.size() == 128) {
111 pending_deltas.clear();
112 last_block_end = docid;
117 void PostingListBuilder::finish()
119 if (pending_deltas.empty()) {
123 assert(!encoded.empty()); // write_header() should already have run.
125 // No interleaving for partial blocks.
126 unsigned char buf[P4NENC_BOUND(128)];
127 unsigned char *end = encode_pfor_single_block<128>(pending_deltas.data(), pending_deltas.size(), /*interleaved=*/false, buf);
128 encoded.append(reinterpret_cast<char *>(buf), reinterpret_cast<char *>(end));
131 void PostingListBuilder::append_block()
133 unsigned char buf[P4NENC_BOUND(128)];
134 assert(pending_deltas.size() == 128);
135 unsigned char *end = encode_pfor_single_block<128>(pending_deltas.data(), 128, /*interleaved=*/true, buf);
136 encoded.append(reinterpret_cast<char *>(buf), reinterpret_cast<char *>(end));
139 void PostingListBuilder::write_header(uint32_t docid)
141 unsigned char buf[P4NENC_BOUND(1)];
142 unsigned char *end = write_baseval(docid, buf);
143 encoded.append(reinterpret_cast<char *>(buf), end - buf);
146 class DatabaseReceiver {
148 virtual ~DatabaseReceiver() = default;
149 virtual void add_file(string filename) = 0;
150 virtual void flush_block() = 0;
153 class DictionaryBuilder : public DatabaseReceiver {
155 DictionaryBuilder(size_t blocks_to_keep, size_t block_size)
156 : blocks_to_keep(blocks_to_keep), block_size(block_size) {}
157 void add_file(string filename) override;
158 void flush_block() override;
159 string train(size_t buf_size);
162 const size_t blocks_to_keep, block_size;
163 string current_block;
164 uint64_t block_num = 0;
165 size_t num_files_in_block = 0;
167 std::mt19937 reservoir_rand{ 1234 }; // Fixed seed for reproducibility.
168 bool keep_current_block = true;
169 int64_t slot_for_current_block = -1;
171 vector<string> sampled_blocks;
172 vector<size_t> lengths;
175 void DictionaryBuilder::add_file(string filename)
177 if (keep_current_block) { // Only bother saving the filenames if we're actually keeping the block.
178 if (!current_block.empty()) {
179 current_block.push_back('\0');
181 current_block += filename;
183 if (++num_files_in_block == block_size) {
188 void DictionaryBuilder::flush_block()
190 if (keep_current_block) {
191 if (slot_for_current_block == -1) {
192 lengths.push_back(current_block.size());
193 sampled_blocks.push_back(move(current_block));
195 lengths[slot_for_current_block] = current_block.size();
196 sampled_blocks[slot_for_current_block] = move(current_block);
199 current_block.clear();
200 num_files_in_block = 0;
203 if (block_num < blocks_to_keep) {
204 keep_current_block = true;
205 slot_for_current_block = -1;
207 // Keep every block with equal probability (reservoir sampling).
208 uint64_t idx = uniform_int_distribution<uint64_t>(0, block_num)(reservoir_rand);
209 keep_current_block = (idx < blocks_to_keep);
210 slot_for_current_block = idx;
214 string DictionaryBuilder::train(size_t buf_size)
216 string dictionary_buf;
217 sort(sampled_blocks.begin(), sampled_blocks.end()); // Seemingly important for decompression speed.
218 for (const string &block : sampled_blocks) {
219 dictionary_buf += block;
223 buf.resize(buf_size);
224 size_t ret = ZDICT_trainFromBuffer(&buf[0], buf_size, dictionary_buf.data(), lengths.data(), lengths.size());
225 dprintf(stderr, "Sampled %zu bytes in %zu blocks, built a dictionary of size %zu\n", dictionary_buf.size(), lengths.size(), ret);
228 sampled_blocks.clear();
234 class Corpus : public DatabaseReceiver {
236 Corpus(FILE *outfp, size_t block_size, ZSTD_CDict *cdict)
237 : invindex(new PostingListBuilder *[NUM_TRIGRAMS]), outfp(outfp), block_size(block_size), cdict(cdict)
239 fill(invindex.get(), invindex.get() + NUM_TRIGRAMS, nullptr);
243 for (unsigned i = 0; i < NUM_TRIGRAMS; ++i) {
248 void add_file(string filename) override;
249 void flush_block() override;
251 vector<uint64_t> filename_blocks;
252 size_t num_files = 0, num_files_in_block = 0, num_blocks = 0;
253 bool seen_trigram(uint32_t trgm)
255 return invindex[trgm] != nullptr;
257 PostingListBuilder &get_pl_builder(uint32_t trgm)
259 if (invindex[trgm] == nullptr) {
260 invindex[trgm] = new PostingListBuilder;
262 return *invindex[trgm];
266 unique_ptr<PostingListBuilder *[]> invindex;
268 string current_block;
270 const size_t block_size;
274 void Corpus::add_file(string filename)
277 if (!current_block.empty()) {
278 current_block.push_back('\0');
280 current_block += filename;
281 if (++num_files_in_block == block_size) {
286 void Corpus::flush_block()
288 if (current_block.empty()) {
292 uint32_t docid = num_blocks;
295 const char *ptr = current_block.c_str();
296 while (ptr < current_block.c_str() + current_block.size()) {
299 for (size_t j = 0; j < s.size() - 2; ++j) {
300 uint32_t trgm = read_trigram(s, j);
301 get_pl_builder(trgm).add_docid(docid);
307 // Compress and add the filename block.
308 filename_blocks.push_back(ftell(outfp));
309 string compressed = zstd_compress(current_block, cdict, &tempbuf);
310 if (fwrite(compressed.data(), compressed.size(), 1, outfp) != 1) {
315 current_block.clear();
316 num_files_in_block = 0;
320 string read_cstr(FILE *fp)
336 void handle_directory(FILE *fp, DatabaseReceiver *receiver)
339 if (fread(&dummy, sizeof(dummy), 1, fp) != 1) {
347 string dir_path = read_cstr(fp);
348 if (dir_path == "/") {
354 if (type == DBE_NORMAL) {
355 string filename = read_cstr(fp);
356 receiver->add_file(dir_path + "/" + filename);
357 } else if (type == DBE_DIRECTORY) {
358 string dirname = read_cstr(fp);
359 receiver->add_file(dir_path + "/" + dirname);
361 return; // Probably end.
366 void read_mlocate(const char *filename, DatabaseReceiver *receiver)
368 FILE *fp = fopen(filename, "rb");
375 if (fread(&hdr, sizeof(hdr), 1, fp) != 1) {
376 perror("short read");
380 // TODO: Care about the base path.
381 string path = read_cstr(fp);
383 handle_directory(fp, receiver);
388 string zstd_compress(const string &src, ZSTD_CDict *cdict, string *tempbuf)
390 static ZSTD_CCtx *ctx = nullptr;
391 if (ctx == nullptr) {
392 ctx = ZSTD_createCCtx();
395 size_t max_size = ZSTD_compressBound(src.size());
396 if (tempbuf->size() < max_size) {
397 tempbuf->resize(max_size);
400 if (cdict == nullptr) {
401 size = ZSTD_compressCCtx(ctx, &(*tempbuf)[0], max_size, src.data(), src.size(), /*level=*/6);
403 size = ZSTD_compress_usingCDict(ctx, &(*tempbuf)[0], max_size, src.data(), src.size(), cdict);
405 return string(tempbuf->data(), size);
408 bool is_prime(uint32_t x)
410 if ((x % 2) == 0 || (x % 3) == 0) {
413 uint32_t limit = ceil(sqrt(x));
414 for (uint32_t factor = 5; factor <= limit; ++factor) {
415 if ((x % factor) == 0) {
422 uint32_t next_prime(uint32_t x)
427 while (!is_prime(x)) {
433 unique_ptr<Trigram[]> create_hashtable(Corpus &corpus, const vector<uint32_t> &all_trigrams, uint32_t ht_size, uint32_t num_overflow_slots)
435 unique_ptr<Trigram[]> ht(new Trigram[ht_size + num_overflow_slots + 1]); // 1 for the sentinel element at the end.
436 for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
437 ht[i].trgm = uint32_t(-1);
438 ht[i].num_docids = 0;
441 for (uint32_t trgm : all_trigrams) {
442 // We don't know offset yet, so set it to zero.
443 Trigram to_insert{ trgm, uint32_t(corpus.get_pl_builder(trgm).num_docids), 0 };
445 uint32_t bucket = hash_trigram(trgm, ht_size);
446 unsigned distance = 0;
447 while (ht[bucket].num_docids != 0) {
448 // Robin Hood hashing; reduces the longest distance by a lot.
449 unsigned other_distance = bucket - hash_trigram(ht[bucket].trgm, ht_size);
450 if (distance > other_distance) {
451 swap(to_insert, ht[bucket]);
452 distance = other_distance;
455 ++bucket, ++distance;
456 if (distance > num_overflow_slots) {
460 ht[bucket] = to_insert;
465 void do_build(const char *infile, const char *outfile, int block_size)
467 steady_clock::time_point start __attribute__((unused)) = steady_clock::now();
470 FILE *outfp = fopen(outfile, "wb");
474 memcpy(hdr.magic, "\0plocate", 8);
475 hdr.version = -1; // Mark as broken.
476 hdr.hashtable_size = 0; // Not known yet.
477 hdr.extra_ht_slots = num_overflow_slots;
479 hdr.hash_table_offset_bytes = -1; // We don't know these offsets yet.
481 hdr.filename_index_offset_bytes = -1;
482 hdr.zstd_dictionary_length_bytes = -1;
483 fwrite(&hdr, sizeof(hdr), 1, outfp);
485 // Train the dictionary by sampling real blocks.
486 // The documentation for ZDICT_trainFromBuffer() claims that a reasonable
487 // dictionary size is ~100 kB, but 1 kB seems to actually compress better for us,
488 // and decompress just as fast.
489 DictionaryBuilder builder(/*blocks_to_keep=*/1000, block_size);
490 read_mlocate(infile, &builder);
491 string dictionary = builder.train(1024);
492 ZSTD_CDict *cdict = ZSTD_createCDict(dictionary.data(), dictionary.size(), /*level=*/6);
494 hdr.zstd_dictionary_offset_bytes = ftell(outfp);
495 fwrite(dictionary.data(), dictionary.size(), 1, outfp);
496 hdr.zstd_dictionary_length_bytes = dictionary.size();
498 Corpus corpus(outfp, block_size, cdict);
499 read_mlocate(infile, &corpus);
500 if (false) { // To read a plain text file.
501 FILE *fp = fopen(infile, "r");
504 if (fgets(buf, 1024, fp) == nullptr || feof(fp)) {
508 if (s.back() == '\n')
510 corpus.add_file(move(s));
514 corpus.flush_block();
515 dprintf("Read %zu files from %s\n", corpus.num_files, infile);
516 hdr.num_docids = corpus.filename_blocks.size();
518 // Stick an empty block at the end as sentinel.
519 corpus.filename_blocks.push_back(ftell(outfp));
520 const size_t bytes_for_filenames = corpus.filename_blocks.back() - corpus.filename_blocks.front();
522 // Write the offsets to the filenames.
523 hdr.filename_index_offset_bytes = ftell(outfp);
524 const size_t bytes_for_filename_index = corpus.filename_blocks.size() * sizeof(uint64_t);
525 fwrite(corpus.filename_blocks.data(), corpus.filename_blocks.size(), sizeof(uint64_t), outfp);
526 corpus.filename_blocks.clear();
527 corpus.filename_blocks.shrink_to_fit();
529 // Finish up encoding the posting lists.
530 size_t trigrams = 0, longest_posting_list = 0;
531 size_t bytes_for_posting_lists = 0;
532 for (unsigned trgm = 0; trgm < NUM_TRIGRAMS; ++trgm) {
533 if (!corpus.seen_trigram(trgm))
535 PostingListBuilder &pl_builder = corpus.get_pl_builder(trgm);
537 longest_posting_list = max(longest_posting_list, pl_builder.num_docids);
538 trigrams += pl_builder.num_docids;
539 bytes_for_posting_lists += pl_builder.encoded.size();
541 dprintf("%zu files, %zu different trigrams, %zu entries, avg len %.2f, longest %zu\n",
542 corpus.num_files, corpus.invindex.size(), trigrams, double(trigrams) / corpus.invindex.size(), longest_posting_list);
543 dprintf("%zu bytes used for posting lists (%.2f bits/entry)\n", bytes_for_posting_lists, 8 * bytes_for_posting_lists / double(trigrams));
545 dprintf("Building posting lists took %.1f ms.\n\n", 1e3 * duration<float>(steady_clock::now() - start).count());
547 // Find the used trigrams.
548 vector<uint32_t> all_trigrams;
549 for (unsigned trgm = 0; trgm < NUM_TRIGRAMS; ++trgm) {
550 if (corpus.seen_trigram(trgm)) {
551 all_trigrams.push_back(trgm);
555 // Create the hash table.
556 unique_ptr<Trigram[]> hashtable;
557 uint32_t ht_size = next_prime(all_trigrams.size());
559 hashtable = create_hashtable(corpus, all_trigrams, ht_size, num_overflow_slots);
560 if (hashtable == nullptr) {
561 dprintf("Failed creating hash table of size %u, increasing by 5%% and trying again.\n", ht_size);
562 ht_size = next_prime(ht_size * 1.05);
564 dprintf("Created hash table of size %u.\n\n", ht_size);
569 // Find the offsets for each posting list.
570 size_t bytes_for_hashtable = (ht_size + num_overflow_slots + 1) * sizeof(Trigram);
571 uint64_t offset = ftell(outfp) + bytes_for_hashtable;
572 for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
573 hashtable[i].offset = offset; // Needs to be there even for empty slots.
574 if (hashtable[i].num_docids == 0) {
578 const string &encoded = corpus.get_pl_builder(hashtable[i].trgm).encoded;
579 offset += encoded.size();
582 // Write the hash table.
583 hdr.hash_table_offset_bytes = ftell(outfp);
584 hdr.hashtable_size = ht_size;
585 fwrite(hashtable.get(), ht_size + num_overflow_slots + 1, sizeof(Trigram), outfp);
587 // Write the actual posting lists.
588 for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
589 if (hashtable[i].num_docids == 0) {
592 const string &encoded = corpus.get_pl_builder(hashtable[i].trgm).encoded;
593 fwrite(encoded.data(), encoded.size(), 1, outfp);
596 // Rewind, and write the updated header.
598 fseek(outfp, 0, SEEK_SET);
599 fwrite(&hdr, sizeof(hdr), 1, outfp);
602 size_t total_bytes __attribute__((unused)) = (bytes_for_hashtable + bytes_for_posting_lists + bytes_for_filename_index + bytes_for_filenames);
604 dprintf("Block size: %7d files\n", block_size);
605 dprintf("Dictionary: %'7.1f MB\n", dictionary.size() / 1048576.0);
606 dprintf("Hash table: %'7.1f MB\n", bytes_for_hashtable / 1048576.0);
607 dprintf("Posting lists: %'7.1f MB\n", bytes_for_posting_lists / 1048576.0);
608 dprintf("Filename index: %'7.1f MB\n", bytes_for_filename_index / 1048576.0);
609 dprintf("Filenames: %'7.1f MB\n", bytes_for_filenames / 1048576.0);
610 dprintf("Total: %'7.1f MB\n", total_bytes / 1048576.0);
617 "Usage: plocate-build MLOCATE_DB PLOCATE_DB\n"
619 "Generate plocate index from mlocate.db, typically /var/lib/mlocate/mlocate.db.\n"
620 "Normally, the destination should be /var/lib/mlocate/plocate.db.\n"
622 " -b, --block-size SIZE number of filenames to store in each block (default 32)\n"
623 " --help print this help\n"
624 " --version print version information\n");
629 printf("plocate-build %s\n", PLOCATE_VERSION);
630 printf("Copyright 2020 Steinar H. Gunderson\n");
631 printf("License GPLv2+: GNU GPL version 2 or later <https://gnu.org/licenses/gpl.html>.\n");
632 printf("This is free software: you are free to change and redistribute it.\n");
633 printf("There is NO WARRANTY, to the extent permitted by law.\n");
636 int main(int argc, char **argv)
638 static const struct option long_options[] = {
639 { "block-size", required_argument, 0, 'b' },
640 { "help", no_argument, 0, 'h' },
641 { "version", no_argument, 0, 'V' },
647 setlocale(LC_ALL, "");
649 int option_index = 0;
650 int c = getopt_long(argc, argv, "b:hV", long_options, &option_index);
656 block_size = atoi(optarg);
669 if (argc - optind != 2) {
674 do_build(argv[optind], argv[optind + 1], block_size);