]> git.sesse.net Git - plocate/blob - plocate-build.cpp
Switch to our own TurboPFor encoder.
[plocate] / plocate-build.cpp
1 #include "db.h"
2
3 #include <algorithm>
4 #include <arpa/inet.h>
5 #include <assert.h>
6 #include <chrono>
7 #include <endian.h>
8 #include <fcntl.h>
9 #include <math.h>
10 #include <memory>
11 #include <stdio.h>
12 #include <string.h>
13 #include <string>
14 #include <sys/stat.h>
15 #include <sys/types.h>
16 #include <unistd.h>
17 #include <unordered_map>
18 #include <vector>
19 #include <zstd.h>
20
21 #include "turbopfor-encode.h"
22
23 #define P4NENC_BOUND(n) ((n + 127) / 128 + (n + 32) * sizeof(uint32_t))
24 #define dprintf(...)
25 //#define dprintf(...) fprintf(stderr, __VA_ARGS__);
26
27 #define NUM_TRIGRAMS 16777216
28
29 using namespace std;
30 using namespace std::chrono;
31
32 string zstd_compress(const string &src, string *tempbuf);
33
34 constexpr unsigned num_overflow_slots = 16;
35
36 static inline uint32_t read_unigram(const string_view s, size_t idx)
37 {
38         if (idx < s.size()) {
39                 return (unsigned char)s[idx];
40         } else {
41                 return 0;
42         }
43 }
44
45 static inline uint32_t read_trigram(const string_view s, size_t start)
46 {
47         return read_unigram(s, start) |
48                 (read_unigram(s, start + 1) << 8) |
49                 (read_unigram(s, start + 2) << 16);
50 }
51
52 enum {
53         DBE_NORMAL = 0, /* A non-directory file */
54         DBE_DIRECTORY = 1, /* A directory */
55         DBE_END = 2 /* End of directory contents; contains no name */
56 };
57
58 // From mlocate.
59 struct db_header {
60         uint8_t magic[8];
61         uint32_t conf_size;
62         uint8_t version;
63         uint8_t check_visibility;
64         uint8_t pad[2];
65 };
66
67 // From mlocate.
68 struct db_directory {
69         uint64_t time_sec;
70         uint32_t time_nsec;
71         uint8_t pad[4];
72 };
73
74 class PostingListBuilder {
75 public:
76         inline void add_docid(uint32_t docid);
77         void finish();
78
79         string encoded;
80         size_t num_docids = 0;
81
82 private:
83         void write_header(uint32_t docid);
84         void append_block();
85
86         vector<uint32_t> pending_deltas;
87
88         uint32_t last_block_end, last_docid = -1;
89 };
90
91 void PostingListBuilder::add_docid(uint32_t docid)
92 {
93         // Deduplicate against the last inserted value, if any.
94         if (docid == last_docid) {
95                 return;
96         }
97
98         if (num_docids == 0) {
99                 // Very first docid.
100                 write_header(docid);
101                 ++num_docids;
102                 last_block_end = last_docid = docid;
103                 return;
104         }
105
106         pending_deltas.push_back(docid - last_docid - 1);
107         last_docid = docid;
108         if (pending_deltas.size() == 128) {
109                 append_block();
110                 pending_deltas.clear();
111                 last_block_end = docid;
112         }
113         ++num_docids;
114 }
115
116 void PostingListBuilder::finish()
117 {
118         if (pending_deltas.empty()) {
119                 return;
120         }
121
122         assert(!encoded.empty());  // write_header() should already have run.
123
124         // No interleaving for partial blocks.
125         unsigned char buf[P4NENC_BOUND(128)];
126         unsigned char *end = encode_pfor_single_block<128>(pending_deltas.data(), pending_deltas.size(), /*interleaved=*/false, buf);
127         encoded.append(reinterpret_cast<char *>(buf), reinterpret_cast<char *>(end));
128 }
129
130 void PostingListBuilder::append_block()
131 {
132         unsigned char buf[P4NENC_BOUND(128)];
133         assert(pending_deltas.size() == 128);
134         unsigned char *end = encode_pfor_single_block<128>(pending_deltas.data(), 128, /*interleaved=*/true, buf);
135         encoded.append(reinterpret_cast<char *>(buf), reinterpret_cast<char *>(end));
136 }
137
138 void PostingListBuilder::write_header(uint32_t docid)
139 {
140         unsigned char buf[P4NENC_BOUND(1)];
141         unsigned char *end = write_baseval(docid, buf);
142         encoded.append(reinterpret_cast<char *>(buf), end - buf);
143 }
144
145 class Corpus {
146 public:
147         Corpus(FILE *outfp, size_t block_size)
148                 : invindex(new PostingListBuilder *[NUM_TRIGRAMS]), outfp(outfp), block_size(block_size) {}
149         void add_file(string filename);
150         void flush_block();
151
152         vector<uint64_t> filename_blocks;
153         size_t num_files = 0, num_files_in_block = 0, num_blocks = 0;
154         bool seen_trigram(uint32_t trgm)
155         {
156                 return invindex[trgm] != nullptr;
157         }
158         PostingListBuilder &get_pl_builder(uint32_t trgm)
159         {
160                 if (invindex[trgm] == nullptr) {
161                         invindex[trgm] = new PostingListBuilder;
162                 }
163                 return *invindex[trgm];
164         }
165
166 private:
167         unique_ptr<PostingListBuilder *[]> invindex;
168         FILE *outfp;
169         string current_block;
170         string tempbuf;
171         const size_t block_size;
172 };
173
174 void Corpus::add_file(string filename)
175 {
176         ++num_files;
177         if (!current_block.empty()) {
178                 current_block.push_back('\0');
179         }
180         current_block += filename;
181         if (++num_files_in_block == block_size) {
182                 flush_block();
183         }
184 }
185
186 void Corpus::flush_block()
187 {
188         if (current_block.empty()) {
189                 return;
190         }
191
192         uint32_t docid = num_blocks;
193
194         // Create trigrams.
195         const char *ptr = current_block.c_str();
196         while (ptr < current_block.c_str() + current_block.size()) {
197                 string_view s(ptr);
198                 if (s.size() >= 3) {
199                         for (size_t j = 0; j < s.size() - 2; ++j) {
200                                 uint32_t trgm = read_trigram(s, j);
201                                 get_pl_builder(trgm).add_docid(docid);
202                         }
203                 }
204                 ptr += s.size() + 1;
205         }
206
207         // Compress and add the filename block.
208         filename_blocks.push_back(ftell(outfp));
209         string compressed = zstd_compress(current_block, &tempbuf);
210         if (fwrite(compressed.data(), compressed.size(), 1, outfp) != 1) {
211                 perror("fwrite()");
212                 exit(1);
213         }
214
215         current_block.clear();
216         num_files_in_block = 0;
217         ++num_blocks;
218 }
219
220 string read_cstr(FILE *fp)
221 {
222         string ret;
223         for (;;) {
224                 int ch = getc(fp);
225                 if (ch == -1) {
226                         perror("getc");
227                         exit(1);
228                 }
229                 if (ch == 0) {
230                         return ret;
231                 }
232                 ret.push_back(ch);
233         }
234 }
235
236 void handle_directory(FILE *fp, Corpus *corpus)
237 {
238         db_directory dummy;
239         if (fread(&dummy, sizeof(dummy), 1, fp) != 1) {
240                 if (feof(fp)) {
241                         return;
242                 } else {
243                         perror("fread");
244                 }
245         }
246
247         string dir_path = read_cstr(fp);
248         if (dir_path == "/") {
249                 dir_path = "";
250         }
251
252         for (;;) {
253                 int type = getc(fp);
254                 if (type == DBE_NORMAL) {
255                         string filename = read_cstr(fp);
256                         corpus->add_file(dir_path + "/" + filename);
257                 } else if (type == DBE_DIRECTORY) {
258                         string dirname = read_cstr(fp);
259                         corpus->add_file(dir_path + "/" + dirname);
260                 } else {
261                         return;  // Probably end.
262                 }
263         }
264 }
265
266 void read_mlocate(const char *filename, Corpus *corpus)
267 {
268         FILE *fp = fopen(filename, "rb");
269         if (fp == nullptr) {
270                 perror(filename);
271                 exit(1);
272         }
273
274         db_header hdr;
275         if (fread(&hdr, sizeof(hdr), 1, fp) != 1) {
276                 perror("short read");
277                 exit(1);
278         }
279
280         // TODO: Care about the base path.
281         string path = read_cstr(fp);
282         while (!feof(fp)) {
283                 handle_directory(fp, corpus);
284         }
285         fclose(fp);
286 }
287
288 string zstd_compress(const string &src, string *tempbuf)
289 {
290         size_t max_size = ZSTD_compressBound(src.size());
291         if (tempbuf->size() < max_size) {
292                 tempbuf->resize(max_size);
293         }
294         size_t size = ZSTD_compress(&(*tempbuf)[0], max_size, src.data(), src.size(), /*level=*/6);
295         return string(tempbuf->data(), size);
296 }
297
298 bool is_prime(uint32_t x)
299 {
300         if ((x % 2) == 0 || (x % 3) == 0) {
301                 return false;
302         }
303         uint32_t limit = ceil(sqrt(x));
304         for (uint32_t factor = 5; factor <= limit; ++factor) {
305                 if ((x % factor) == 0) {
306                         return false;
307                 }
308         }
309         return true;
310 }
311
312 uint32_t next_prime(uint32_t x)
313 {
314         if ((x % 2) == 0) {
315                 ++x;
316         }
317         while (!is_prime(x)) {
318                 x += 2;
319         }
320         return x;
321 }
322
323 unique_ptr<Trigram[]> create_hashtable(Corpus &corpus, const vector<uint32_t> &all_trigrams, uint32_t ht_size, uint32_t num_overflow_slots)
324 {
325         unique_ptr<Trigram[]> ht(new Trigram[ht_size + num_overflow_slots + 1]);  // 1 for the sentinel element at the end.
326         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
327                 ht[i].trgm = uint32_t(-1);
328                 ht[i].num_docids = 0;
329                 ht[i].offset = 0;
330         }
331         for (uint32_t trgm : all_trigrams) {
332                 // We don't know offset yet, so set it to zero.
333                 Trigram to_insert{ trgm, uint32_t(corpus.get_pl_builder(trgm).num_docids), 0 };
334
335                 uint32_t bucket = hash_trigram(trgm, ht_size);
336                 unsigned distance = 0;
337                 while (ht[bucket].num_docids != 0) {
338                         // Robin Hood hashing; reduces the longest distance by a lot.
339                         unsigned other_distance = bucket - hash_trigram(ht[bucket].trgm, ht_size);
340                         if (distance > other_distance) {
341                                 swap(to_insert, ht[bucket]);
342                                 distance = other_distance;
343                         }
344
345                         ++bucket, ++distance;
346                         if (distance > num_overflow_slots) {
347                                 return nullptr;
348                         }
349                 }
350                 ht[bucket] = to_insert;
351         }
352         return ht;
353 }
354
355 void do_build(const char *infile, const char *outfile, int block_size)
356 {
357         steady_clock::time_point start __attribute__((unused)) = steady_clock::now();
358
359         umask(0027);
360         FILE *outfp = fopen(outfile, "wb");
361
362         // Write the header.
363         Header hdr;
364         memcpy(hdr.magic, "\0plocate", 8);
365         hdr.version = -1;  // Mark as broken.
366         hdr.hashtable_size = 0;  // Not known yet.
367         hdr.extra_ht_slots = num_overflow_slots;
368         hdr.hash_table_offset_bytes = -1;  // We don't know these offsets yet.
369         hdr.filename_index_offset_bytes = -1;
370         fwrite(&hdr, sizeof(hdr), 1, outfp);
371
372         Corpus corpus(outfp, block_size);
373
374         read_mlocate(infile, &corpus);
375         if (false) {  // To read a plain text file.
376                 FILE *fp = fopen(infile, "r");
377                 while (!feof(fp)) {
378                         char buf[1024];
379                         if (fgets(buf, 1024, fp) == nullptr || feof(fp)) {
380                                 break;
381                         }
382                         string s(buf);
383                         if (s.back() == '\n')
384                                 s.pop_back();
385                         corpus.add_file(move(s));
386                 }
387                 fclose(fp);
388         }
389         corpus.flush_block();
390         dprintf("Read %zu files from %s\n", corpus.num_files, infile);
391         hdr.num_docids = corpus.filename_blocks.size();
392
393         // Stick an empty block at the end as sentinel.
394         corpus.filename_blocks.push_back(ftell(outfp));
395         const size_t bytes_for_filenames = corpus.filename_blocks.back() - corpus.filename_blocks.front();
396
397         // Write the offsets to the filenames.
398         hdr.filename_index_offset_bytes = ftell(outfp);
399         const size_t bytes_for_filename_index = corpus.filename_blocks.size() * sizeof(uint64_t);
400         fwrite(corpus.filename_blocks.data(), corpus.filename_blocks.size(), sizeof(uint64_t), outfp);
401         corpus.filename_blocks.clear();
402         corpus.filename_blocks.shrink_to_fit();
403
404         // Finish up encoding the posting lists.
405         size_t trigrams = 0, longest_posting_list = 0;
406         size_t bytes_for_posting_lists = 0;
407         for (unsigned trgm = 0; trgm < NUM_TRIGRAMS; ++trgm) {
408                 if (!corpus.seen_trigram(trgm))
409                         continue;
410                 PostingListBuilder &pl_builder = corpus.get_pl_builder(trgm);
411                 pl_builder.finish();
412                 longest_posting_list = max(longest_posting_list, pl_builder.num_docids);
413                 trigrams += pl_builder.num_docids;
414                 bytes_for_posting_lists += pl_builder.encoded.size();
415         }
416         dprintf("%zu files, %zu different trigrams, %zu entries, avg len %.2f, longest %zu\n",
417                 corpus.num_files, corpus.invindex.size(), trigrams, double(trigrams) / corpus.invindex.size(), longest_posting_list);
418         dprintf("%zu bytes used for posting lists (%.2f bits/entry)\n", bytes_for_posting_lists, 8 * bytes_for_posting_lists / double(trigrams));
419
420         dprintf("Building posting lists took %.1f ms.\n\n", 1e3 * duration<float>(steady_clock::now() - start).count());
421
422         // Find the used trigrams.
423         vector<uint32_t> all_trigrams;
424         for (unsigned trgm = 0; trgm < NUM_TRIGRAMS; ++trgm) {
425                 if (corpus.seen_trigram(trgm)) {
426                         all_trigrams.push_back(trgm);
427                 }
428         }
429
430         // Create the hash table.
431         unique_ptr<Trigram[]> hashtable;
432         uint32_t ht_size = next_prime(all_trigrams.size());
433         for (;;) {
434                 hashtable = create_hashtable(corpus, all_trigrams, ht_size, num_overflow_slots);
435                 if (hashtable == nullptr) {
436                         dprintf("Failed creating hash table of size %u, increasing by 5%% and trying again.\n", ht_size);
437                         ht_size = next_prime(ht_size * 1.05);
438                 } else {
439                         dprintf("Created hash table of size %u.\n\n", ht_size);
440                         break;
441                 }
442         }
443
444         // Find the offsets for each posting list.
445         size_t bytes_for_hashtable = (ht_size + num_overflow_slots + 1) * sizeof(Trigram);
446         uint64_t offset = ftell(outfp) + bytes_for_hashtable;
447         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
448                 hashtable[i].offset = offset;  // Needs to be there even for empty slots.
449                 if (hashtable[i].num_docids == 0) {
450                         continue;
451                 }
452
453                 const string &encoded = corpus.get_pl_builder(hashtable[i].trgm).encoded;
454                 offset += encoded.size();
455         }
456
457         // Write the hash table.
458         hdr.hash_table_offset_bytes = ftell(outfp);
459         hdr.hashtable_size = ht_size;
460         fwrite(hashtable.get(), ht_size + num_overflow_slots + 1, sizeof(Trigram), outfp);
461
462         // Write the actual posting lists.
463         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
464                 if (hashtable[i].num_docids == 0) {
465                         continue;
466                 }
467                 const string &encoded = corpus.get_pl_builder(hashtable[i].trgm).encoded;
468                 fwrite(encoded.data(), encoded.size(), 1, outfp);
469         }
470
471         // Rewind, and write the updated header.
472         hdr.version = 0;
473         fseek(outfp, 0, SEEK_SET);
474         fwrite(&hdr, sizeof(hdr), 1, outfp);
475         fclose(outfp);
476
477         size_t total_bytes __attribute__((unused)) = (bytes_for_hashtable + bytes_for_posting_lists + bytes_for_filename_index + bytes_for_filenames);
478
479         dprintf("Block size:     %7d files\n", block_size);
480         dprintf("Hash table:     %'7.1f MB\n", bytes_for_hashtable / 1048576.0);
481         dprintf("Posting lists:  %'7.1f MB\n", bytes_for_posting_lists / 1048576.0);
482         dprintf("Filename index: %'7.1f MB\n", bytes_for_filename_index / 1048576.0);
483         dprintf("Filenames:      %'7.1f MB\n", bytes_for_filenames / 1048576.0);
484         dprintf("Total:          %'7.1f MB\n", total_bytes / 1048576.0);
485         dprintf("\n");
486 }
487
488 int main(int argc, char **argv)
489 {
490         do_build(argv[1], argv[2], 32);
491         exit(EXIT_SUCCESS);
492 }