]> git.sesse.net Git - plocate/blob - plocate-build.cpp
Turn off the SIMD temporarily.
[plocate] / plocate-build.cpp
1 #include "db.h"
2 #include "vp4.h"
3
4 #include <algorithm>
5 #include <arpa/inet.h>
6 #include <assert.h>
7 #include <chrono>
8 #include <endian.h>
9 #include <fcntl.h>
10 #include <math.h>
11 #include <memory>
12 #include <stdio.h>
13 #include <string.h>
14 #include <string>
15 #include <sys/stat.h>
16 #include <sys/types.h>
17 #include <unistd.h>
18 #include <unordered_map>
19 #include <vector>
20 #include <zstd.h>
21
22 #define P4NENC_BOUND(n) ((n + 127) / 128 + (n + 32) * sizeof(uint32_t))
23 #define dprintf(...)
24 //#define dprintf(...) fprintf(stderr, __VA_ARGS__);
25
26 #define NUM_TRIGRAMS 16777216
27
28 using namespace std;
29 using namespace std::chrono;
30
31 string zstd_compress(const string &src, string *tempbuf);
32
33 constexpr unsigned num_overflow_slots = 16;
34
35 static inline uint32_t read_unigram(const string_view s, size_t idx)
36 {
37         if (idx < s.size()) {
38                 return (unsigned char)s[idx];
39         } else {
40                 return 0;
41         }
42 }
43
44 static inline uint32_t read_trigram(const string_view s, size_t start)
45 {
46         return read_unigram(s, start) |
47                 (read_unigram(s, start + 1) << 8) |
48                 (read_unigram(s, start + 2) << 16);
49 }
50
51 enum {
52         DBE_NORMAL = 0, /* A non-directory file */
53         DBE_DIRECTORY = 1, /* A directory */
54         DBE_END = 2 /* End of directory contents; contains no name */
55 };
56
57 // From mlocate.
58 struct db_header {
59         uint8_t magic[8];
60         uint32_t conf_size;
61         uint8_t version;
62         uint8_t check_visibility;
63         uint8_t pad[2];
64 };
65
66 // From mlocate.
67 struct db_directory {
68         uint64_t time_sec;
69         uint32_t time_nsec;
70         uint8_t pad[4];
71 };
72
73 class PostingListBuilder {
74 public:
75         inline void add_docid(uint32_t docid);
76         void finish();
77
78         string encoded;
79         size_t num_docids = 0;
80
81 private:
82         void write_header(uint32_t docid);
83         void append_block();
84
85         vector<uint32_t> pending_docids;
86
87         uint32_t last_block_end, last_docid = -1;
88 };
89
90 void PostingListBuilder::add_docid(uint32_t docid)
91 {
92         // Deduplicate against the last inserted value, if any.
93         if (docid == last_docid) {
94                 return;
95         }
96
97         if (num_docids == 0) {
98                 // Very first docid.
99                 write_header(docid);
100                 ++num_docids;
101                 last_block_end = last_docid = docid;
102                 return;
103         }
104
105         last_docid = docid;
106         pending_docids.push_back(docid);
107         if (pending_docids.size() == 128) {
108                 append_block();
109                 pending_docids.clear();
110                 last_block_end = docid;
111         }
112         ++num_docids;
113 }
114
115 void PostingListBuilder::finish()
116 {
117         if (pending_docids.empty()) {
118                 return;
119         }
120
121         assert(!encoded.empty());  // write_header() should already have run.
122
123         // No interleaving for partial blocks.
124         unsigned char buf[P4NENC_BOUND(128)];
125         unsigned char *end = p4d1enc32(pending_docids.data(), pending_docids.size(), buf, last_block_end);
126         encoded.append(reinterpret_cast<char *>(buf), reinterpret_cast<char *>(end));
127 }
128
129 void PostingListBuilder::append_block()
130 {
131         unsigned char buf[P4NENC_BOUND(128)];
132         assert(pending_docids.size() == 128);
133         unsigned char *end = p4d1enc32(pending_docids.data(), 128, buf, last_block_end);
134         encoded.append(reinterpret_cast<char *>(buf), reinterpret_cast<char *>(end));
135 }
136
137 void PostingListBuilder::write_header(uint32_t docid)
138 {
139         unsigned char buf[P4NENC_BOUND(1)];
140         size_t bytes = p4nd1enc32(&docid, 1, buf);
141         encoded.append(reinterpret_cast<char *>(buf), bytes);
142 }
143
144 class Corpus {
145 public:
146         Corpus(FILE *outfp, size_t block_size)
147                 : invindex(new PostingListBuilder*[NUM_TRIGRAMS]), outfp(outfp), block_size(block_size) {}
148         void add_file(string filename);
149         void flush_block();
150
151         vector<uint64_t> filename_blocks;
152         size_t num_files = 0, num_files_in_block = 0, num_blocks = 0;
153         bool seen_trigram(uint32_t trgm) {
154                 return invindex[trgm] != nullptr;
155         }
156         PostingListBuilder& get_pl_builder(uint32_t trgm) {
157                 if (invindex[trgm] == nullptr) {
158                         invindex[trgm] = new PostingListBuilder;
159                 }
160                 return *invindex[trgm];
161         }
162
163 private:
164         unique_ptr<PostingListBuilder*[]> invindex;
165         FILE *outfp;
166         string current_block;
167         string tempbuf;
168         const size_t block_size;
169 };
170
171 void Corpus::add_file(string filename)
172 {
173         ++num_files;
174         if (!current_block.empty()) {
175                 current_block.push_back('\0');
176         }
177         current_block += filename;
178         if (++num_files_in_block == block_size) {
179                 flush_block();
180         }
181 }
182
183 void Corpus::flush_block()
184 {
185         if (current_block.empty()) {
186                 return;
187         }
188
189         uint32_t docid = num_blocks;
190
191         // Create trigrams.
192         const char *ptr = current_block.c_str();
193         while (ptr < current_block.c_str() + current_block.size()) {
194                 string_view s(ptr);
195                 if (s.size() >= 3) {
196                         for (size_t j = 0; j < s.size() - 2; ++j) {
197                                 uint32_t trgm = read_trigram(s, j);
198                                 get_pl_builder(trgm).add_docid(docid);
199                         }
200                 }
201                 ptr += s.size() + 1;
202         }
203
204         // Compress and add the filename block.
205         filename_blocks.push_back(ftell(outfp));
206         string compressed = zstd_compress(current_block, &tempbuf);
207         if (fwrite(compressed.data(), compressed.size(), 1, outfp) != 1) {
208                 perror("fwrite()");
209                 exit(1);
210         }
211
212         current_block.clear();
213         num_files_in_block = 0;
214         ++num_blocks;
215 }
216
217 string read_cstr(FILE *fp)
218 {
219         string ret;
220         for ( ;; ) {
221                 int ch = getc(fp);
222                 if (ch == -1) {
223                         perror("getc");
224                         exit(1);
225                 }
226                 if (ch == 0) {
227                         return ret;
228                 }
229                 ret.push_back(ch);
230         }
231 }
232
233 void handle_directory(FILE *fp, Corpus *corpus)
234 {
235         db_directory dummy;
236         if (fread(&dummy, sizeof(dummy), 1, fp) != 1) {
237                 if (feof(fp)) {
238                         return;
239                 } else {
240                         perror("fread");
241                 }
242         }
243
244         string dir_path = read_cstr(fp);
245         if (dir_path == "/") {
246                 dir_path = "";
247         }
248
249         for (;;) {
250                 int type = getc(fp);
251                 if (type == DBE_NORMAL) {
252                         string filename = read_cstr(fp);
253                         corpus->add_file(dir_path + "/" + filename);
254                 } else if (type == DBE_DIRECTORY) {
255                         string dirname = read_cstr(fp);
256                         corpus->add_file(dir_path + "/" + dirname);
257                 } else {
258                         return;  // Probably end.
259                 }
260         }
261 }
262
263 void read_mlocate(const char *filename, Corpus *corpus)
264 {
265         FILE *fp = fopen(filename, "rb");
266         if (fp == nullptr) {
267                 perror(filename);
268                 exit(1);
269         }
270
271         db_header hdr;
272         if (fread(&hdr, sizeof(hdr), 1, fp) != 1) {
273                 perror("short read");
274                 exit(1);
275         }
276
277         // TODO: Care about the base path.
278         string path = read_cstr(fp);
279         while (!feof(fp)) {
280                 handle_directory(fp, corpus);
281         }
282         fclose(fp);
283 }
284
285 string zstd_compress(const string &src, string *tempbuf)
286 {
287         size_t max_size = ZSTD_compressBound(src.size());
288         if (tempbuf->size() < max_size) {
289                 tempbuf->resize(max_size);
290         }
291         size_t size = ZSTD_compress(&(*tempbuf)[0], max_size, src.data(), src.size(), /*level=*/6);
292         return string(tempbuf->data(), size);
293 }
294
295 bool is_prime(uint32_t x)
296 {
297         if ((x % 2) == 0 || (x % 3) == 0) {
298                 return false;
299         }
300         uint32_t limit = ceil(sqrt(x));
301         for (uint32_t factor = 5; factor <= limit; ++factor) {
302                 if ((x % factor) == 0) {
303                         return false;
304                 }
305         }
306         return true;
307 }
308
309 uint32_t next_prime(uint32_t x)
310 {
311         if ((x % 2) == 0) {
312                 ++x;
313         }
314         while (!is_prime(x)) {
315                 x += 2;
316         }
317         return x;
318 }
319
320 unique_ptr<Trigram[]> create_hashtable(Corpus &corpus, const vector<uint32_t> &all_trigrams, uint32_t ht_size, uint32_t num_overflow_slots)
321 {
322         unique_ptr<Trigram[]> ht(new Trigram[ht_size + num_overflow_slots + 1]);  // 1 for the sentinel element at the end.
323         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
324                 ht[i].trgm = uint32_t(-1);
325                 ht[i].num_docids = 0;
326                 ht[i].offset = 0;
327         }
328         for (uint32_t trgm : all_trigrams) {
329                 // We don't know offset yet, so set it to zero.
330                 Trigram to_insert{ trgm, uint32_t(corpus.get_pl_builder(trgm).num_docids), 0 };
331
332                 uint32_t bucket = hash_trigram(trgm, ht_size);
333                 unsigned distance = 0;
334                 while (ht[bucket].num_docids != 0) {
335                         // Robin Hood hashing; reduces the longest distance by a lot.
336                         unsigned other_distance = bucket - hash_trigram(ht[bucket].trgm, ht_size);
337                         if (distance > other_distance) {
338                                 swap(to_insert, ht[bucket]);
339                                 distance = other_distance;
340                         }
341
342                         ++bucket, ++distance;
343                         if (distance > num_overflow_slots) {
344                                 return nullptr;
345                         }
346                 }
347                 ht[bucket] = to_insert;
348         }
349         return ht;
350 }
351
352 void do_build(const char *infile, const char *outfile, int block_size)
353 {
354         steady_clock::time_point start __attribute__((unused)) = steady_clock::now();
355
356         umask(0027);
357         FILE *outfp = fopen(outfile, "wb");
358
359         // Write the header.
360         Header hdr;
361         memcpy(hdr.magic, "\0plocate", 8);
362         hdr.version = -1;  // Mark as broken.
363         hdr.hashtable_size = 0;  // Not known yet.
364         hdr.extra_ht_slots = num_overflow_slots;
365         hdr.hash_table_offset_bytes = -1;  // We don't know these offsets yet.
366         hdr.filename_index_offset_bytes = -1;
367         fwrite(&hdr, sizeof(hdr), 1, outfp);
368
369         Corpus corpus(outfp, block_size);
370
371         read_mlocate(infile, &corpus);
372         if (false) {  // To read a plain text file.
373                 FILE *fp = fopen(infile, "r");
374                 while (!feof(fp)) {
375                         char buf[1024];
376                         if (fgets(buf, 1024, fp) == nullptr || feof(fp)) {
377                                 break;
378                         }
379                         string s(buf);
380                         if (s.back() == '\n')
381                                 s.pop_back();
382                         corpus.add_file(move(s));
383                 }
384                 fclose(fp);
385         }
386         corpus.flush_block();
387         dprintf("Read %zu files from %s\n", corpus.num_files, infile);
388         hdr.num_docids = corpus.filename_blocks.size();
389
390         // Stick an empty block at the end as sentinel.
391         corpus.filename_blocks.push_back(ftell(outfp));
392         const size_t bytes_for_filenames = corpus.filename_blocks.back() - corpus.filename_blocks.front();
393
394         // Write the offsets to the filenames.
395         hdr.filename_index_offset_bytes = ftell(outfp);
396         const size_t bytes_for_filename_index = corpus.filename_blocks.size() * sizeof(uint64_t);
397         fwrite(corpus.filename_blocks.data(), corpus.filename_blocks.size(), sizeof(uint64_t), outfp);
398         corpus.filename_blocks.clear();
399         corpus.filename_blocks.shrink_to_fit();
400
401         // Finish up encoding the posting lists.
402         size_t trigrams = 0, longest_posting_list = 0;
403         size_t bytes_for_posting_lists = 0;
404         for (unsigned trgm = 0; trgm < NUM_TRIGRAMS; ++trgm) {
405                 if (!corpus.seen_trigram(trgm)) continue;
406                 PostingListBuilder &pl_builder = corpus.get_pl_builder(trgm);
407                 pl_builder.finish();
408                 longest_posting_list = max(longest_posting_list, pl_builder.num_docids);
409                 trigrams += pl_builder.num_docids;
410                 bytes_for_posting_lists += pl_builder.encoded.size();
411         }
412         dprintf("%zu files, %zu different trigrams, %zu entries, avg len %.2f, longest %zu\n",
413                 corpus.num_files, corpus.invindex.size(), trigrams, double(trigrams) / corpus.invindex.size(), longest_posting_list);
414         dprintf("%zu bytes used for posting lists (%.2f bits/entry)\n", bytes_for_posting_lists, 8 * bytes_for_posting_lists / double(trigrams));
415
416         dprintf("Building posting lists took %.1f ms.\n\n", 1e3 * duration<float>(steady_clock::now() - start).count());
417
418         // Find the used trigrams.
419         vector<uint32_t> all_trigrams;
420         for (unsigned trgm = 0; trgm < NUM_TRIGRAMS; ++trgm) {
421                 if (corpus.seen_trigram(trgm)) {
422                         all_trigrams.push_back(trgm);
423                 }
424         }
425
426         // Create the hash table.
427         unique_ptr<Trigram[]> hashtable;
428         uint32_t ht_size = next_prime(all_trigrams.size());
429         for (;;) {
430                 hashtable = create_hashtable(corpus, all_trigrams, ht_size, num_overflow_slots);
431                 if (hashtable == nullptr) {
432                         dprintf("Failed creating hash table of size %u, increasing by 5%% and trying again.\n", ht_size);
433                         ht_size = next_prime(ht_size * 1.05);
434                 } else {
435                         dprintf("Created hash table of size %u.\n\n", ht_size);
436                         break;
437                 }
438         }
439
440         // Find the offsets for each posting list.
441         size_t bytes_for_hashtable = (ht_size + num_overflow_slots + 1) * sizeof(Trigram);
442         uint64_t offset = ftell(outfp) + bytes_for_hashtable;
443         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
444                 hashtable[i].offset = offset;  // Needs to be there even for empty slots.
445                 if (hashtable[i].num_docids == 0) {
446                         continue;
447                 }
448
449                 const string &encoded = corpus.get_pl_builder(hashtable[i].trgm).encoded;
450                 offset += encoded.size();
451         }
452
453         // Write the hash table.
454         hdr.hash_table_offset_bytes = ftell(outfp);
455         hdr.hashtable_size = ht_size;
456         fwrite(hashtable.get(), ht_size + num_overflow_slots + 1, sizeof(Trigram), outfp);
457
458         // Write the actual posting lists.
459         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
460                 if (hashtable[i].num_docids == 0) {
461                         continue;
462                 }
463                 const string &encoded = corpus.get_pl_builder(hashtable[i].trgm).encoded;
464                 fwrite(encoded.data(), encoded.size(), 1, outfp);
465         }
466
467         // Rewind, and write the updated header.
468         hdr.version = 0;
469         fseek(outfp, 0, SEEK_SET);
470         fwrite(&hdr, sizeof(hdr), 1, outfp);
471         fclose(outfp);
472
473         size_t total_bytes __attribute__((unused)) = (bytes_for_hashtable + bytes_for_posting_lists + bytes_for_filename_index + bytes_for_filenames);
474
475         dprintf("Block size:     %7d files\n", block_size);
476         dprintf("Hash table:     %'7.1f MB\n", bytes_for_hashtable / 1048576.0);
477         dprintf("Posting lists:  %'7.1f MB\n", bytes_for_posting_lists / 1048576.0);
478         dprintf("Filename index: %'7.1f MB\n", bytes_for_filename_index / 1048576.0);
479         dprintf("Filenames:      %'7.1f MB\n", bytes_for_filenames / 1048576.0);
480         dprintf("Total:          %'7.1f MB\n", total_bytes / 1048576.0);
481         dprintf("\n");
482 }
483
484 int main(int argc, char **argv)
485 {
486         do_build(argv[1], argv[2], 32);
487         exit(EXIT_SUCCESS);
488 }