]> git.sesse.net Git - plocate/blob - plocate-build.cpp
Use zstd dictionaries.
[plocate] / plocate-build.cpp
1 #include "db.h"
2 #include "turbopfor-encode.h"
3
4 #include <algorithm>
5 #include <assert.h>
6 #include <chrono>
7 #include <getopt.h>
8 #include <iosfwd>
9 #include <math.h>
10 #include <memory>
11 #include <random>
12 #include <stdint.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <string>
17 #include <string_view>
18 #include <sys/stat.h>
19 #include <utility>
20 #include <vector>
21 #include <zdict.h>
22 #include <zstd.h>
23
24 #define P4NENC_BOUND(n) ((n + 127) / 128 + (n + 32) * sizeof(uint32_t))
25 #define dprintf(...)
26 //#define dprintf(...) fprintf(stderr, __VA_ARGS__);
27
28 #define NUM_TRIGRAMS 16777216
29
30 using namespace std;
31 using namespace std::chrono;
32
33 string zstd_compress(const string &src, ZSTD_CDict *cdict, string *tempbuf);
34
35 constexpr unsigned num_overflow_slots = 16;
36
37 static inline uint32_t read_unigram(const string_view s, size_t idx)
38 {
39         if (idx < s.size()) {
40                 return (unsigned char)s[idx];
41         } else {
42                 return 0;
43         }
44 }
45
46 static inline uint32_t read_trigram(const string_view s, size_t start)
47 {
48         return read_unigram(s, start) |
49                 (read_unigram(s, start + 1) << 8) |
50                 (read_unigram(s, start + 2) << 16);
51 }
52
53 enum {
54         DBE_NORMAL = 0, /* A non-directory file */
55         DBE_DIRECTORY = 1, /* A directory */
56         DBE_END = 2 /* End of directory contents; contains no name */
57 };
58
59 // From mlocate.
60 struct db_header {
61         uint8_t magic[8];
62         uint32_t conf_size;
63         uint8_t version;
64         uint8_t check_visibility;
65         uint8_t pad[2];
66 };
67
68 // From mlocate.
69 struct db_directory {
70         uint64_t time_sec;
71         uint32_t time_nsec;
72         uint8_t pad[4];
73 };
74
75 class PostingListBuilder {
76 public:
77         inline void add_docid(uint32_t docid);
78         void finish();
79
80         string encoded;
81         size_t num_docids = 0;
82
83 private:
84         void write_header(uint32_t docid);
85         void append_block();
86
87         vector<uint32_t> pending_deltas;
88
89         uint32_t last_block_end, last_docid = -1;
90 };
91
92 void PostingListBuilder::add_docid(uint32_t docid)
93 {
94         // Deduplicate against the last inserted value, if any.
95         if (docid == last_docid) {
96                 return;
97         }
98
99         if (num_docids == 0) {
100                 // Very first docid.
101                 write_header(docid);
102                 ++num_docids;
103                 last_block_end = last_docid = docid;
104                 return;
105         }
106
107         pending_deltas.push_back(docid - last_docid - 1);
108         last_docid = docid;
109         if (pending_deltas.size() == 128) {
110                 append_block();
111                 pending_deltas.clear();
112                 last_block_end = docid;
113         }
114         ++num_docids;
115 }
116
117 void PostingListBuilder::finish()
118 {
119         if (pending_deltas.empty()) {
120                 return;
121         }
122
123         assert(!encoded.empty());  // write_header() should already have run.
124
125         // No interleaving for partial blocks.
126         unsigned char buf[P4NENC_BOUND(128)];
127         unsigned char *end = encode_pfor_single_block<128>(pending_deltas.data(), pending_deltas.size(), /*interleaved=*/false, buf);
128         encoded.append(reinterpret_cast<char *>(buf), reinterpret_cast<char *>(end));
129 }
130
131 void PostingListBuilder::append_block()
132 {
133         unsigned char buf[P4NENC_BOUND(128)];
134         assert(pending_deltas.size() == 128);
135         unsigned char *end = encode_pfor_single_block<128>(pending_deltas.data(), 128, /*interleaved=*/true, buf);
136         encoded.append(reinterpret_cast<char *>(buf), reinterpret_cast<char *>(end));
137 }
138
139 void PostingListBuilder::write_header(uint32_t docid)
140 {
141         unsigned char buf[P4NENC_BOUND(1)];
142         unsigned char *end = write_baseval(docid, buf);
143         encoded.append(reinterpret_cast<char *>(buf), end - buf);
144 }
145
146 class DatabaseReceiver {
147 public:
148         virtual ~DatabaseReceiver() = default;
149         virtual void add_file(string filename) = 0;
150         virtual void flush_block() = 0;
151 };
152
153 class DictionaryBuilder : public DatabaseReceiver {
154 public:
155         DictionaryBuilder(size_t blocks_to_keep, size_t block_size)
156                 : blocks_to_keep(blocks_to_keep), block_size(block_size) {}
157         void add_file(string filename) override;
158         void flush_block() override;
159         string train(size_t buf_size);
160
161 private:
162         const size_t blocks_to_keep, block_size;
163         string current_block;
164         uint64_t block_num = 0;
165         size_t num_files_in_block = 0;
166
167         std::mt19937 reservoir_rand{ 1234 };  // Fixed seed for reproducibility.
168         bool keep_current_block = true;
169         int64_t slot_for_current_block = -1;
170
171         vector<string> sampled_blocks;
172         vector<size_t> lengths;
173 };
174
175 void DictionaryBuilder::add_file(string filename)
176 {
177         if (keep_current_block) {  // Only bother saving the filenames if we're actually keeping the block.
178                 if (!current_block.empty()) {
179                         current_block.push_back('\0');
180                 }
181                 current_block += filename;
182         }
183         if (++num_files_in_block == block_size) {
184                 flush_block();
185         }
186 }
187
188 void DictionaryBuilder::flush_block()
189 {
190         if (keep_current_block) {
191                 if (slot_for_current_block == -1) {
192                         lengths.push_back(current_block.size());
193                         sampled_blocks.push_back(move(current_block));
194                 } else {
195                         lengths[slot_for_current_block] = current_block.size();
196                         sampled_blocks[slot_for_current_block] = move(current_block);
197                 }
198         }
199         current_block.clear();
200         num_files_in_block = 0;
201         ++block_num;
202
203         if (block_num < blocks_to_keep) {
204                 keep_current_block = true;
205                 slot_for_current_block = -1;
206         } else {
207                 // Keep every block with equal probability (reservoir sampling).
208                 uint64_t idx = uniform_int_distribution<uint64_t>(0, block_num)(reservoir_rand);
209                 keep_current_block = (idx < blocks_to_keep);
210                 slot_for_current_block = idx;
211         }
212 }
213
214 string DictionaryBuilder::train(size_t buf_size)
215 {
216         string dictionary_buf;
217         sort(sampled_blocks.begin(), sampled_blocks.end());  // Seemingly important for decompression speed.
218         for (const string &block : sampled_blocks) {
219                 dictionary_buf += block;
220         }
221
222         string buf;
223         buf.resize(buf_size);
224         size_t ret = ZDICT_trainFromBuffer(&buf[0], buf_size, dictionary_buf.data(), lengths.data(), lengths.size());
225         dprintf(stderr, "Sampled %zu bytes in %zu blocks, built a dictionary of size %zu\n", dictionary_buf.size(), lengths.size(), ret);
226         buf.resize(ret);
227
228         sampled_blocks.clear();
229         lengths.clear();
230
231         return buf;
232 }
233
234 class Corpus : public DatabaseReceiver {
235 public:
236         Corpus(FILE *outfp, size_t block_size, ZSTD_CDict *cdict)
237                 : invindex(new PostingListBuilder *[NUM_TRIGRAMS]), outfp(outfp), block_size(block_size), cdict(cdict)
238         {
239                 fill(invindex.get(), invindex.get() + NUM_TRIGRAMS, nullptr);
240         }
241         ~Corpus() override
242         {
243                 for (unsigned i = 0; i < NUM_TRIGRAMS; ++i) {
244                         delete invindex[i];
245                 }
246         }
247
248         void add_file(string filename) override;
249         void flush_block() override;
250
251         vector<uint64_t> filename_blocks;
252         size_t num_files = 0, num_files_in_block = 0, num_blocks = 0;
253         bool seen_trigram(uint32_t trgm)
254         {
255                 return invindex[trgm] != nullptr;
256         }
257         PostingListBuilder &get_pl_builder(uint32_t trgm)
258         {
259                 if (invindex[trgm] == nullptr) {
260                         invindex[trgm] = new PostingListBuilder;
261                 }
262                 return *invindex[trgm];
263         }
264
265 private:
266         unique_ptr<PostingListBuilder *[]> invindex;
267         FILE *outfp;
268         string current_block;
269         string tempbuf;
270         const size_t block_size;
271         ZSTD_CDict *cdict;
272 };
273
274 void Corpus::add_file(string filename)
275 {
276         ++num_files;
277         if (!current_block.empty()) {
278                 current_block.push_back('\0');
279         }
280         current_block += filename;
281         if (++num_files_in_block == block_size) {
282                 flush_block();
283         }
284 }
285
286 void Corpus::flush_block()
287 {
288         if (current_block.empty()) {
289                 return;
290         }
291
292         uint32_t docid = num_blocks;
293
294         // Create trigrams.
295         const char *ptr = current_block.c_str();
296         while (ptr < current_block.c_str() + current_block.size()) {
297                 string_view s(ptr);
298                 if (s.size() >= 3) {
299                         for (size_t j = 0; j < s.size() - 2; ++j) {
300                                 uint32_t trgm = read_trigram(s, j);
301                                 get_pl_builder(trgm).add_docid(docid);
302                         }
303                 }
304                 ptr += s.size() + 1;
305         }
306
307         // Compress and add the filename block.
308         filename_blocks.push_back(ftell(outfp));
309         string compressed = zstd_compress(current_block, cdict, &tempbuf);
310         if (fwrite(compressed.data(), compressed.size(), 1, outfp) != 1) {
311                 perror("fwrite()");
312                 exit(1);
313         }
314
315         current_block.clear();
316         num_files_in_block = 0;
317         ++num_blocks;
318 }
319
320 string read_cstr(FILE *fp)
321 {
322         string ret;
323         for (;;) {
324                 int ch = getc(fp);
325                 if (ch == -1) {
326                         perror("getc");
327                         exit(1);
328                 }
329                 if (ch == 0) {
330                         return ret;
331                 }
332                 ret.push_back(ch);
333         }
334 }
335
336 void handle_directory(FILE *fp, DatabaseReceiver *receiver)
337 {
338         db_directory dummy;
339         if (fread(&dummy, sizeof(dummy), 1, fp) != 1) {
340                 if (feof(fp)) {
341                         return;
342                 } else {
343                         perror("fread");
344                 }
345         }
346
347         string dir_path = read_cstr(fp);
348         if (dir_path == "/") {
349                 dir_path = "";
350         }
351
352         for (;;) {
353                 int type = getc(fp);
354                 if (type == DBE_NORMAL) {
355                         string filename = read_cstr(fp);
356                         receiver->add_file(dir_path + "/" + filename);
357                 } else if (type == DBE_DIRECTORY) {
358                         string dirname = read_cstr(fp);
359                         receiver->add_file(dir_path + "/" + dirname);
360                 } else {
361                         return;  // Probably end.
362                 }
363         }
364 }
365
366 void read_mlocate(const char *filename, DatabaseReceiver *receiver)
367 {
368         FILE *fp = fopen(filename, "rb");
369         if (fp == nullptr) {
370                 perror(filename);
371                 exit(1);
372         }
373
374         db_header hdr;
375         if (fread(&hdr, sizeof(hdr), 1, fp) != 1) {
376                 perror("short read");
377                 exit(1);
378         }
379
380         // TODO: Care about the base path.
381         string path = read_cstr(fp);
382         while (!feof(fp)) {
383                 handle_directory(fp, receiver);
384         }
385         fclose(fp);
386 }
387
388 string zstd_compress(const string &src, ZSTD_CDict *cdict, string *tempbuf)
389 {
390         static ZSTD_CCtx *ctx = nullptr;
391         if (ctx == nullptr) {
392                 ctx = ZSTD_createCCtx();
393         }
394
395         size_t max_size = ZSTD_compressBound(src.size());
396         if (tempbuf->size() < max_size) {
397                 tempbuf->resize(max_size);
398         }
399         size_t size;
400         if (cdict == nullptr) {
401                 size = ZSTD_compressCCtx(ctx, &(*tempbuf)[0], max_size, src.data(), src.size(), /*level=*/6);
402         } else {
403                 size = ZSTD_compress_usingCDict(ctx, &(*tempbuf)[0], max_size, src.data(), src.size(), cdict);
404         }
405         return string(tempbuf->data(), size);
406 }
407
408 bool is_prime(uint32_t x)
409 {
410         if ((x % 2) == 0 || (x % 3) == 0) {
411                 return false;
412         }
413         uint32_t limit = ceil(sqrt(x));
414         for (uint32_t factor = 5; factor <= limit; ++factor) {
415                 if ((x % factor) == 0) {
416                         return false;
417                 }
418         }
419         return true;
420 }
421
422 uint32_t next_prime(uint32_t x)
423 {
424         if ((x % 2) == 0) {
425                 ++x;
426         }
427         while (!is_prime(x)) {
428                 x += 2;
429         }
430         return x;
431 }
432
433 unique_ptr<Trigram[]> create_hashtable(Corpus &corpus, const vector<uint32_t> &all_trigrams, uint32_t ht_size, uint32_t num_overflow_slots)
434 {
435         unique_ptr<Trigram[]> ht(new Trigram[ht_size + num_overflow_slots + 1]);  // 1 for the sentinel element at the end.
436         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
437                 ht[i].trgm = uint32_t(-1);
438                 ht[i].num_docids = 0;
439                 ht[i].offset = 0;
440         }
441         for (uint32_t trgm : all_trigrams) {
442                 // We don't know offset yet, so set it to zero.
443                 Trigram to_insert{ trgm, uint32_t(corpus.get_pl_builder(trgm).num_docids), 0 };
444
445                 uint32_t bucket = hash_trigram(trgm, ht_size);
446                 unsigned distance = 0;
447                 while (ht[bucket].num_docids != 0) {
448                         // Robin Hood hashing; reduces the longest distance by a lot.
449                         unsigned other_distance = bucket - hash_trigram(ht[bucket].trgm, ht_size);
450                         if (distance > other_distance) {
451                                 swap(to_insert, ht[bucket]);
452                                 distance = other_distance;
453                         }
454
455                         ++bucket, ++distance;
456                         if (distance > num_overflow_slots) {
457                                 return nullptr;
458                         }
459                 }
460                 ht[bucket] = to_insert;
461         }
462         return ht;
463 }
464
465 void do_build(const char *infile, const char *outfile, int block_size)
466 {
467         steady_clock::time_point start __attribute__((unused)) = steady_clock::now();
468
469         umask(0027);
470         FILE *outfp = fopen(outfile, "wb");
471
472         // Write the header.
473         Header hdr;
474         memcpy(hdr.magic, "\0plocate", 8);
475         hdr.version = -1;  // Mark as broken.
476         hdr.hashtable_size = 0;  // Not known yet.
477         hdr.extra_ht_slots = num_overflow_slots;
478         hdr.num_docids = 0;
479         hdr.hash_table_offset_bytes = -1;  // We don't know these offsets yet.
480         hdr.max_version = 1;
481         hdr.filename_index_offset_bytes = -1;
482         hdr.zstd_dictionary_length_bytes = -1;
483         fwrite(&hdr, sizeof(hdr), 1, outfp);
484
485         // Train the dictionary by sampling real blocks.
486         // The documentation for ZDICT_trainFromBuffer() claims that a reasonable
487         // dictionary size is ~100 kB, but 1 kB seems to actually compress better for us,
488         // and decompress just as fast.
489         DictionaryBuilder builder(/*blocks_to_keep=*/1000, block_size);
490         read_mlocate(infile, &builder);
491         string dictionary = builder.train(1024);
492         ZSTD_CDict *cdict = ZSTD_createCDict(dictionary.data(), dictionary.size(), /*level=*/6);
493
494         hdr.zstd_dictionary_offset_bytes = ftell(outfp);
495         fwrite(dictionary.data(), dictionary.size(), 1, outfp);
496         hdr.zstd_dictionary_length_bytes = dictionary.size();
497
498         Corpus corpus(outfp, block_size, cdict);
499         read_mlocate(infile, &corpus);
500         if (false) {  // To read a plain text file.
501                 FILE *fp = fopen(infile, "r");
502                 while (!feof(fp)) {
503                         char buf[1024];
504                         if (fgets(buf, 1024, fp) == nullptr || feof(fp)) {
505                                 break;
506                         }
507                         string s(buf);
508                         if (s.back() == '\n')
509                                 s.pop_back();
510                         corpus.add_file(move(s));
511                 }
512                 fclose(fp);
513         }
514         corpus.flush_block();
515         dprintf("Read %zu files from %s\n", corpus.num_files, infile);
516         hdr.num_docids = corpus.filename_blocks.size();
517
518         // Stick an empty block at the end as sentinel.
519         corpus.filename_blocks.push_back(ftell(outfp));
520         const size_t bytes_for_filenames = corpus.filename_blocks.back() - corpus.filename_blocks.front();
521
522         // Write the offsets to the filenames.
523         hdr.filename_index_offset_bytes = ftell(outfp);
524         const size_t bytes_for_filename_index = corpus.filename_blocks.size() * sizeof(uint64_t);
525         fwrite(corpus.filename_blocks.data(), corpus.filename_blocks.size(), sizeof(uint64_t), outfp);
526         corpus.filename_blocks.clear();
527         corpus.filename_blocks.shrink_to_fit();
528
529         // Finish up encoding the posting lists.
530         size_t trigrams = 0, longest_posting_list = 0;
531         size_t bytes_for_posting_lists = 0;
532         for (unsigned trgm = 0; trgm < NUM_TRIGRAMS; ++trgm) {
533                 if (!corpus.seen_trigram(trgm))
534                         continue;
535                 PostingListBuilder &pl_builder = corpus.get_pl_builder(trgm);
536                 pl_builder.finish();
537                 longest_posting_list = max(longest_posting_list, pl_builder.num_docids);
538                 trigrams += pl_builder.num_docids;
539                 bytes_for_posting_lists += pl_builder.encoded.size();
540         }
541         dprintf("%zu files, %zu different trigrams, %zu entries, avg len %.2f, longest %zu\n",
542                 corpus.num_files, corpus.invindex.size(), trigrams, double(trigrams) / corpus.invindex.size(), longest_posting_list);
543         dprintf("%zu bytes used for posting lists (%.2f bits/entry)\n", bytes_for_posting_lists, 8 * bytes_for_posting_lists / double(trigrams));
544
545         dprintf("Building posting lists took %.1f ms.\n\n", 1e3 * duration<float>(steady_clock::now() - start).count());
546
547         // Find the used trigrams.
548         vector<uint32_t> all_trigrams;
549         for (unsigned trgm = 0; trgm < NUM_TRIGRAMS; ++trgm) {
550                 if (corpus.seen_trigram(trgm)) {
551                         all_trigrams.push_back(trgm);
552                 }
553         }
554
555         // Create the hash table.
556         unique_ptr<Trigram[]> hashtable;
557         uint32_t ht_size = next_prime(all_trigrams.size());
558         for (;;) {
559                 hashtable = create_hashtable(corpus, all_trigrams, ht_size, num_overflow_slots);
560                 if (hashtable == nullptr) {
561                         dprintf("Failed creating hash table of size %u, increasing by 5%% and trying again.\n", ht_size);
562                         ht_size = next_prime(ht_size * 1.05);
563                 } else {
564                         dprintf("Created hash table of size %u.\n\n", ht_size);
565                         break;
566                 }
567         }
568
569         // Find the offsets for each posting list.
570         size_t bytes_for_hashtable = (ht_size + num_overflow_slots + 1) * sizeof(Trigram);
571         uint64_t offset = ftell(outfp) + bytes_for_hashtable;
572         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
573                 hashtable[i].offset = offset;  // Needs to be there even for empty slots.
574                 if (hashtable[i].num_docids == 0) {
575                         continue;
576                 }
577
578                 const string &encoded = corpus.get_pl_builder(hashtable[i].trgm).encoded;
579                 offset += encoded.size();
580         }
581
582         // Write the hash table.
583         hdr.hash_table_offset_bytes = ftell(outfp);
584         hdr.hashtable_size = ht_size;
585         fwrite(hashtable.get(), ht_size + num_overflow_slots + 1, sizeof(Trigram), outfp);
586
587         // Write the actual posting lists.
588         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
589                 if (hashtable[i].num_docids == 0) {
590                         continue;
591                 }
592                 const string &encoded = corpus.get_pl_builder(hashtable[i].trgm).encoded;
593                 fwrite(encoded.data(), encoded.size(), 1, outfp);
594         }
595
596         // Rewind, and write the updated header.
597         hdr.version = 1;
598         fseek(outfp, 0, SEEK_SET);
599         fwrite(&hdr, sizeof(hdr), 1, outfp);
600         fclose(outfp);
601
602         size_t total_bytes __attribute__((unused)) = (bytes_for_hashtable + bytes_for_posting_lists + bytes_for_filename_index + bytes_for_filenames);
603
604         dprintf("Block size:     %7d files\n", block_size);
605         dprintf("Dictionary:     %'7.1f MB\n", dictionary.size() / 1048576.0);
606         dprintf("Hash table:     %'7.1f MB\n", bytes_for_hashtable / 1048576.0);
607         dprintf("Posting lists:  %'7.1f MB\n", bytes_for_posting_lists / 1048576.0);
608         dprintf("Filename index: %'7.1f MB\n", bytes_for_filename_index / 1048576.0);
609         dprintf("Filenames:      %'7.1f MB\n", bytes_for_filenames / 1048576.0);
610         dprintf("Total:          %'7.1f MB\n", total_bytes / 1048576.0);
611         dprintf("\n");
612 }
613
614 void usage()
615 {
616         printf(
617                 "Usage: plocate-build MLOCATE_DB PLOCATE_DB\n"
618                 "\n"
619                 "Generate plocate index from mlocate.db, typically /var/lib/mlocate/mlocate.db.\n"
620                 "Normally, the destination should be /var/lib/mlocate/plocate.db.\n"
621                 "\n"
622                 "  -b, --block-size SIZE  number of filenames to store in each block (default 32)\n"
623                 "      --help             print this help\n"
624                 "      --version          print version information\n");
625 }
626
627 void version()
628 {
629         printf("plocate-build %s\n", PLOCATE_VERSION);
630         printf("Copyright 2020 Steinar H. Gunderson\n");
631         printf("License GPLv2+: GNU GPL version 2 or later <https://gnu.org/licenses/gpl.html>.\n");
632         printf("This is free software: you are free to change and redistribute it.\n");
633         printf("There is NO WARRANTY, to the extent permitted by law.\n");
634 }
635
636 int main(int argc, char **argv)
637 {
638         static const struct option long_options[] = {
639                 { "block-size", required_argument, 0, 'b' },
640                 { "help", no_argument, 0, 'h' },
641                 { "version", no_argument, 0, 'V' },
642                 { 0, 0, 0, 0 }
643         };
644
645         int block_size = 32;
646
647         setlocale(LC_ALL, "");
648         for (;;) {
649                 int option_index = 0;
650                 int c = getopt_long(argc, argv, "b:hV", long_options, &option_index);
651                 if (c == -1) {
652                         break;
653                 }
654                 switch (c) {
655                 case 'b':
656                         block_size = atoi(optarg);
657                         break;
658                 case 'h':
659                         usage();
660                         exit(0);
661                 case 'v':
662                         version();
663                         exit(0);
664                 default:
665                         exit(1);
666                 }
667         }
668
669         if (argc - optind != 2) {
670                 usage();
671                 exit(1);
672         }
673
674         do_build(argv[optind], argv[optind + 1], block_size);
675         exit(EXIT_SUCCESS);
676 }