]> git.sesse.net Git - plocate/blob - plocate-build.cpp
Make the builder write out filenames as they get compressed.
[plocate] / plocate-build.cpp
1 #include "db.h"
2 #include "vp4.h"
3
4 #include <algorithm>
5 #include <arpa/inet.h>
6 #include <assert.h>
7 #include <chrono>
8 #include <endian.h>
9 #include <fcntl.h>
10 #include <math.h>
11 #include <memory>
12 #include <stdio.h>
13 #include <string.h>
14 #include <string>
15 #include <sys/mman.h>
16 #include <sys/stat.h>
17 #include <sys/types.h>
18 #include <unistd.h>
19 #include <unordered_map>
20 #include <vector>
21 #include <zstd.h>
22
23 #define P4NENC_BOUND(n) ((n + 127) / 128 + (n + 32) * sizeof(uint32_t))
24 #define dprintf(...)
25 //#define dprintf(...) fprintf(stderr, __VA_ARGS__);
26
27 using namespace std;
28 using namespace std::chrono;
29
30 string zstd_compress(const string &src, string *tempbuf);
31
32 constexpr unsigned num_overflow_slots = 16;
33
34 static inline uint32_t read_unigram(const string_view s, size_t idx)
35 {
36         if (idx < s.size()) {
37                 return (unsigned char)s[idx];
38         } else {
39                 return 0;
40         }
41 }
42
43 static inline uint32_t read_trigram(const string_view s, size_t start)
44 {
45         return read_unigram(s, start) |
46                 (read_unigram(s, start + 1) << 8) |
47                 (read_unigram(s, start + 2) << 16);
48 }
49
50 enum {
51         DBE_NORMAL = 0, /* A non-directory file */
52         DBE_DIRECTORY = 1, /* A directory */
53         DBE_END = 2 /* End of directory contents; contains no name */
54 };
55
56 // From mlocate.
57 struct db_header {
58         uint8_t magic[8];
59         uint32_t conf_size;
60         uint8_t version;
61         uint8_t check_visibility;
62         uint8_t pad[2];
63 };
64
65 // From mlocate.
66 struct db_directory {
67         uint64_t time_sec;
68         uint32_t time_nsec;
69         uint8_t pad[4];
70 };
71
72 class PostingListBuilder {
73 public:
74         void add_docid(uint32_t docid);
75         void finish();
76
77         string encoded;
78         size_t num_docids = 0;
79
80 private:
81         void write_header(uint32_t docid);
82         void append_block();
83
84         vector<uint32_t> pending_docids;
85
86         uint32_t last_block_end;
87 };
88
89 void PostingListBuilder::add_docid(uint32_t docid)
90 {
91         // Deduplicate against the last inserted value, if any.
92         if (pending_docids.empty()) {
93                 if (encoded.empty()) {
94                         // Very first docid.
95                         write_header(docid);
96                         ++num_docids;
97                         last_block_end = docid;
98                         return;
99                 } else if (docid == last_block_end) {
100                         return;
101                 }
102         } else {
103                 if (docid == pending_docids.back()) {
104                         return;
105                 }
106         }
107
108         pending_docids.push_back(docid);
109         if (pending_docids.size() == 128) {
110                 append_block();
111                 pending_docids.clear();
112                 last_block_end = docid;
113         }
114         ++num_docids;
115 }
116
117 void PostingListBuilder::finish()
118 {
119         if (pending_docids.empty()) {
120                 return;
121         }
122
123         assert(!encoded.empty());  // write_header() should already have run.
124
125         // No interleaving for partial blocks.
126         unsigned char buf[P4NENC_BOUND(128)];
127         unsigned char *end = p4d1enc32(pending_docids.data(), pending_docids.size(), buf, last_block_end);
128         encoded.append(reinterpret_cast<char *>(buf), reinterpret_cast<char *>(end));
129 }
130
131 void PostingListBuilder::append_block()
132 {
133         unsigned char buf[P4NENC_BOUND(128)];
134         assert(pending_docids.size() == 128);
135         unsigned char *end = p4d1enc128v32(pending_docids.data(), 128, buf, last_block_end);
136         encoded.append(reinterpret_cast<char *>(buf), reinterpret_cast<char *>(end));
137 }
138
139 void PostingListBuilder::write_header(uint32_t docid)
140 {
141         unsigned char buf[P4NENC_BOUND(1)];
142         size_t bytes = p4nd1enc128v32(&docid, 1, buf);
143         encoded.append(reinterpret_cast<char *>(buf), bytes);
144 }
145
146 class Corpus {
147 public:
148         Corpus(FILE *outfp, size_t block_size)
149                 : outfp(outfp), block_size(block_size) {}
150         void add_file(string filename);
151         void flush_block();
152
153         vector<uint64_t> filename_blocks;
154         unordered_map<uint32_t, PostingListBuilder> invindex;
155         size_t num_files = 0, num_files_in_block = 0, num_blocks = 0;
156
157 private:
158         FILE *outfp;
159         string current_block;
160         string tempbuf;
161         const size_t block_size;
162 };
163
164 void Corpus::add_file(string filename)
165 {
166         ++num_files;
167         if (!current_block.empty()) {
168                 current_block.push_back('\0');
169         }
170         current_block += filename;
171         if (++num_files_in_block == block_size) {
172                 flush_block();
173         }
174 }
175
176 void Corpus::flush_block()
177 {
178         if (current_block.empty()) {
179                 return;
180         }
181
182         uint32_t docid = num_blocks;
183
184         // Create trigrams.
185         const char *ptr = current_block.c_str();
186         while (ptr < current_block.c_str() + current_block.size()) {
187                 string_view s(ptr);
188                 if (s.size() >= 3) {
189                         for (size_t j = 0; j < s.size() - 2; ++j) {
190                                 uint32_t trgm = read_trigram(s, j);
191                                 invindex[trgm].add_docid(docid);
192                         }
193                 }
194                 ptr += s.size() + 1;
195         }
196
197         // Compress and add the filename block.
198         filename_blocks.push_back(ftell(outfp));
199         string compressed = zstd_compress(current_block, &tempbuf);
200         if (fwrite(compressed.data(), compressed.size(), 1, outfp) != 1) {
201                 perror("fwrite()");
202                 exit(1);
203         }
204
205         current_block.clear();
206         num_files_in_block = 0;
207         ++num_blocks;
208 }
209
210 const char *handle_directory(const char *ptr, Corpus *corpus)
211 {
212         ptr += sizeof(db_directory);
213
214         string dir_path = ptr;
215         ptr += dir_path.size() + 1;
216         if (dir_path == "/") {
217                 dir_path = "";
218         }
219
220         for (;;) {
221                 uint8_t type = *ptr++;
222                 if (type == DBE_NORMAL) {
223                         string filename = ptr;
224                         corpus->add_file(dir_path + "/" + filename);
225                         ptr += filename.size() + 1;
226                 } else if (type == DBE_DIRECTORY) {
227                         string dirname = ptr;
228                         corpus->add_file(dir_path + "/" + dirname);
229                         ptr += dirname.size() + 1;
230                 } else {
231                         return ptr;
232                 }
233         }
234 }
235
236 void read_mlocate(const char *filename, Corpus *corpus)
237 {
238         int fd = open(filename, O_RDONLY);
239         if (fd == -1) {
240                 perror(filename);
241                 exit(1);
242         }
243         off_t len = lseek(fd, 0, SEEK_END);
244         if (len == -1) {
245                 perror("lseek");
246                 exit(1);
247         }
248         const char *data = (char *)mmap(nullptr, len, PROT_READ, MAP_SHARED, fd, /*offset=*/0);
249         if (data == MAP_FAILED) {
250                 perror("mmap");
251                 exit(1);
252         }
253
254         const db_header *hdr = (const db_header *)data;
255
256         // TODO: Care about the base path.
257         string path = data + sizeof(db_header);
258         uint64_t offset = sizeof(db_header) + path.size() + 1 + ntohl(hdr->conf_size);
259
260         const char *ptr = data + offset;
261         while (ptr < data + len) {
262                 ptr = handle_directory(ptr, corpus);
263         }
264
265         munmap((void *)data, len);
266         close(fd);
267 }
268
269 string zstd_compress(const string &src, string *tempbuf)
270 {
271         size_t max_size = ZSTD_compressBound(src.size());
272         if (tempbuf->size() < max_size) {
273                 tempbuf->resize(max_size);
274         }
275         size_t size = ZSTD_compress(&(*tempbuf)[0], max_size, src.data(), src.size(), /*level=*/6);
276         return string(tempbuf->data(), size);
277 }
278
279 bool is_prime(uint32_t x)
280 {
281         if ((x % 2) == 0 || (x % 3) == 0) {
282                 return false;
283         }
284         uint32_t limit = ceil(sqrt(x));
285         for (uint32_t factor = 5; factor <= limit; ++factor) {
286                 if ((x % factor) == 0) {
287                         return false;
288                 }
289         }
290         return true;
291 }
292
293 uint32_t next_prime(uint32_t x)
294 {
295         if ((x % 2) == 0) {
296                 ++x;
297         }
298         while (!is_prime(x)) {
299                 x += 2;
300         }
301         return x;
302 }
303
304 unique_ptr<Trigram[]> create_hashtable(const Corpus &corpus, const vector<uint32_t> &all_trigrams, uint32_t ht_size, uint32_t num_overflow_slots)
305 {
306         unique_ptr<Trigram[]> ht(new Trigram[ht_size + num_overflow_slots + 1]);  // 1 for the sentinel element at the end.
307         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
308                 ht[i].trgm = uint32_t(-1);
309                 ht[i].num_docids = 0;
310                 ht[i].offset = 0;
311         }
312         for (uint32_t trgm : all_trigrams) {
313                 // We don't know offset yet, so set it to zero.
314                 Trigram to_insert{ trgm, uint32_t(corpus.invindex.find(trgm)->second.num_docids), 0 };
315
316                 uint32_t bucket = hash_trigram(trgm, ht_size);
317                 unsigned distance = 0;
318                 while (ht[bucket].num_docids != 0) {
319                         // Robin Hood hashing; reduces the longest distance by a lot.
320                         unsigned other_distance = bucket - hash_trigram(ht[bucket].trgm, ht_size);
321                         if (distance > other_distance) {
322                                 swap(to_insert, ht[bucket]);
323                                 distance = other_distance;
324                         }
325
326                         ++bucket, ++distance;
327                         if (distance > num_overflow_slots) {
328                                 return nullptr;
329                         }
330                 }
331                 ht[bucket] = to_insert;
332         }
333         return ht;
334 }
335
336 void do_build(const char *infile, const char *outfile, int block_size)
337 {
338         steady_clock::time_point start __attribute__((unused)) = steady_clock::now();
339
340         umask(0027);
341         FILE *outfp = fopen(outfile, "wb");
342
343         // Write the header.
344         Header hdr;
345         memcpy(hdr.magic, "\0plocate", 8);
346         hdr.version = -1;  // Mark as broken.
347         hdr.hashtable_size = 0;  // Not known yet.
348         hdr.extra_ht_slots = num_overflow_slots;
349         hdr.hash_table_offset_bytes = -1;  // We don't know these offsets yet.
350         hdr.filename_index_offset_bytes = -1;
351         fwrite(&hdr, sizeof(hdr), 1, outfp);
352
353         Corpus corpus(outfp, block_size);
354
355         read_mlocate(infile, &corpus);
356         if (false) {  // To read a plain text file.
357                 FILE *fp = fopen(infile, "r");
358                 while (!feof(fp)) {
359                         char buf[1024];
360                         if (fgets(buf, 1024, fp) == nullptr || feof(fp)) {
361                                 break;
362                         }
363                         string s(buf);
364                         if (s.back() == '\n')
365                                 s.pop_back();
366                         corpus.add_file(move(s));
367                 }
368                 fclose(fp);
369         }
370         corpus.flush_block();
371         dprintf("Read %zu files from %s\n", corpus.num_files, infile);
372
373         // Stick an empty block at the end as sentinel.
374         corpus.filename_blocks.push_back(ftell(outfp));
375         const size_t bytes_for_filenames = corpus.filename_blocks.back() - corpus.filename_blocks.front();
376
377         // Write the offsets to the filenames.
378         hdr.filename_index_offset_bytes = ftell(outfp);
379         const size_t bytes_for_filename_index = corpus.filename_blocks.size() * sizeof(uint64_t);
380         fwrite(corpus.filename_blocks.data(), corpus.filename_blocks.size(), sizeof(uint64_t), outfp);
381         corpus.filename_blocks.clear();
382         corpus.filename_blocks.shrink_to_fit();
383
384         // Finish up encoding the posting lists.
385         size_t trigrams = 0, longest_posting_list = 0;
386         size_t bytes_for_posting_lists = 0;
387         for (auto &[trigram, pl_builder] : corpus.invindex) {
388                 pl_builder.finish();
389                 longest_posting_list = max(longest_posting_list, pl_builder.num_docids);
390                 trigrams += pl_builder.num_docids;
391                 bytes_for_posting_lists += pl_builder.encoded.size();
392         }
393         dprintf("%zu files, %zu different trigrams, %zu entries, avg len %.2f, longest %zu\n",
394                 corpus.num_files, corpus.invindex.size(), trigrams, double(trigrams) / corpus.invindex.size(), longest_posting_list);
395         dprintf("%zu bytes used for posting lists (%.2f bits/entry)\n", bytes_for_posting_lists, 8 * bytes_for_posting_lists / double(trigrams));
396
397         dprintf("Building posting lists took %.1f ms.\n\n", 1e3 * duration<float>(steady_clock::now() - start).count());
398
399         // Sort the trigrams, mostly to get a consistent result every time
400         // (the hash table will put things in random order anyway).
401         vector<uint32_t> all_trigrams;
402         for (auto &[trigram, pl_builder] : corpus.invindex) {
403                 all_trigrams.push_back(trigram);
404         }
405         sort(all_trigrams.begin(), all_trigrams.end());
406
407         // Create the hash table.
408         unique_ptr<Trigram[]> hashtable;
409         uint32_t ht_size = next_prime(all_trigrams.size());
410         for (;;) {
411                 hashtable = create_hashtable(corpus, all_trigrams, ht_size, num_overflow_slots);
412                 if (hashtable == nullptr) {
413                         dprintf("Failed creating hash table of size %u, increasing by 5%% and trying again.\n", ht_size);
414                         ht_size = next_prime(ht_size * 1.05);
415                 } else {
416                         dprintf("Created hash table of size %u.\n\n", ht_size);
417                         break;
418                 }
419         }
420
421         // Find the offsets for each posting list.
422         size_t bytes_for_hashtable = (ht_size + num_overflow_slots + 1) * sizeof(Trigram);
423         uint64_t offset = ftell(outfp) + bytes_for_hashtable;
424         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
425                 hashtable[i].offset = offset;  // Needs to be there even for empty slots.
426                 if (hashtable[i].num_docids == 0) {
427                         continue;
428                 }
429
430                 const string &encoded = corpus.invindex[hashtable[i].trgm].encoded;
431                 offset += encoded.size();
432         }
433
434         // Write the hash table.
435         hdr.hash_table_offset_bytes = ftell(outfp);
436         hdr.hashtable_size = ht_size;
437         fwrite(hashtable.get(), ht_size + num_overflow_slots + 1, sizeof(Trigram), outfp);
438
439         // Write the actual posting lists.
440         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
441                 if (hashtable[i].num_docids == 0) {
442                         continue;
443                 }
444                 const string &encoded = corpus.invindex[hashtable[i].trgm].encoded;
445                 fwrite(encoded.data(), encoded.size(), 1, outfp);
446         }
447
448         // Rewind, and write the updated header.
449         hdr.version = 0;
450         fseek(outfp, 0, SEEK_SET);
451         fwrite(&hdr, sizeof(hdr), 1, outfp);
452         fclose(outfp);
453
454         size_t total_bytes __attribute__((unused)) = (bytes_for_hashtable + bytes_for_posting_lists + bytes_for_filename_index + bytes_for_filenames);
455
456         dprintf("Block size:     %7d files\n", block_size);
457         dprintf("Hash table:     %'7.1f MB\n", bytes_for_hashtable / 1048576.0);
458         dprintf("Posting lists:  %'7.1f MB\n", bytes_for_posting_lists / 1048576.0);
459         dprintf("Filename index: %'7.1f MB\n", bytes_for_filename_index / 1048576.0);
460         dprintf("Filenames:      %'7.1f MB\n", bytes_for_filenames / 1048576.0);
461         dprintf("Total:          %'7.1f MB\n", total_bytes / 1048576.0);
462         dprintf("\n");
463 }
464
465 int main(int argc, char **argv)
466 {
467         do_build(argv[1], argv[2], 32);
468         exit(EXIT_SUCCESS);
469 }