]> git.sesse.net Git - plocate/blob - plocate-build.cpp
Rerun clang-format.
[plocate] / plocate-build.cpp
1 #include "db.h"
2 #include "vp4.h"
3
4 #include <algorithm>
5 #include <arpa/inet.h>
6 #include <assert.h>
7 #include <chrono>
8 #include <endian.h>
9 #include <fcntl.h>
10 #include <math.h>
11 #include <memory>
12 #include <stdio.h>
13 #include <string.h>
14 #include <string>
15 #include <sys/mman.h>
16 #include <sys/stat.h>
17 #include <sys/types.h>
18 #include <unistd.h>
19 #include <unordered_map>
20 #include <vector>
21 #include <zstd.h>
22
23 #define P4NENC_BOUND(n) ((n + 127) / 128 + (n + 32) * sizeof(uint32_t))
24 #define dprintf(...)
25 //#define dprintf(...) fprintf(stderr, __VA_ARGS__);
26
27 using namespace std;
28 using namespace std::chrono;
29
30 string zstd_compress(const string &src, string *tempbuf);
31
32 static inline uint32_t read_unigram(const string_view s, size_t idx)
33 {
34         if (idx < s.size()) {
35                 return (unsigned char)s[idx];
36         } else {
37                 return 0;
38         }
39 }
40
41 static inline uint32_t read_trigram(const string_view s, size_t start)
42 {
43         return read_unigram(s, start) |
44                 (read_unigram(s, start + 1) << 8) |
45                 (read_unigram(s, start + 2) << 16);
46 }
47
48 enum {
49         DBE_NORMAL = 0, /* A non-directory file */
50         DBE_DIRECTORY = 1, /* A directory */
51         DBE_END = 2 /* End of directory contents; contains no name */
52 };
53
54 // From mlocate.
55 struct db_header {
56         uint8_t magic[8];
57         uint32_t conf_size;
58         uint8_t version;
59         uint8_t check_visibility;
60         uint8_t pad[2];
61 };
62
63 // From mlocate.
64 struct db_directory {
65         uint64_t time_sec;
66         uint32_t time_nsec;
67         uint8_t pad[4];
68 };
69
70 class PostingListBuilder {
71 public:
72         void add_docid(uint32_t docid);
73         void finish();
74
75         string encoded;
76         size_t num_docids = 0;
77
78 private:
79         void write_header(uint32_t docid);
80         void append_block();
81
82         vector<uint32_t> pending_docids;
83
84         uint32_t last_block_end;
85 };
86
87 void PostingListBuilder::add_docid(uint32_t docid)
88 {
89         // Deduplicate against the last inserted value, if any.
90         if (pending_docids.empty()) {
91                 if (encoded.empty()) {
92                         // Very first docid.
93                         write_header(docid);
94                         ++num_docids;
95                         last_block_end = docid;
96                         return;
97                 } else if (docid == last_block_end) {
98                         return;
99                 }
100         } else {
101                 if (docid == pending_docids.back()) {
102                         return;
103                 }
104         }
105
106         pending_docids.push_back(docid);
107         if (pending_docids.size() == 128) {
108                 append_block();
109                 pending_docids.clear();
110                 last_block_end = docid;
111         }
112         ++num_docids;
113 }
114
115 void PostingListBuilder::finish()
116 {
117         if (pending_docids.empty()) {
118                 return;
119         }
120
121         assert(!encoded.empty());  // write_header() should already have run.
122
123         // No interleaving for partial blocks.
124         unsigned char buf[P4NENC_BOUND(128)];
125         unsigned char *end = p4d1enc32(pending_docids.data(), pending_docids.size(), buf, last_block_end);
126         encoded.append(reinterpret_cast<char *>(buf), reinterpret_cast<char *>(end));
127 }
128
129 void PostingListBuilder::append_block()
130 {
131         unsigned char buf[P4NENC_BOUND(128)];
132         assert(pending_docids.size() == 128);
133         unsigned char *end = p4d1enc128v32(pending_docids.data(), 128, buf, last_block_end);
134         encoded.append(reinterpret_cast<char *>(buf), reinterpret_cast<char *>(end));
135 }
136
137 void PostingListBuilder::write_header(uint32_t docid)
138 {
139         unsigned char buf[P4NENC_BOUND(1)];
140         size_t bytes = p4nd1enc128v32(&docid, 1, buf);
141         encoded.append(reinterpret_cast<char *>(buf), bytes);
142 }
143
144 class Corpus {
145 public:
146         Corpus(size_t block_size)
147                 : block_size(block_size) {}
148         void add_file(string filename);
149         void flush_block();
150
151         vector<string> filename_blocks;
152         unordered_map<uint32_t, PostingListBuilder> invindex;
153         size_t num_files = 0, num_files_in_block = 0, num_blocks = 0;
154
155 private:
156         string current_block;
157         string tempbuf;
158         const size_t block_size;
159 };
160
161 void Corpus::add_file(string filename)
162 {
163         ++num_files;
164         if (!current_block.empty()) {
165                 current_block.push_back('\0');
166         }
167         current_block += filename;
168         if (++num_files_in_block == block_size) {
169                 flush_block();
170         }
171 }
172
173 void Corpus::flush_block()
174 {
175         if (current_block.empty()) {
176                 return;
177         }
178
179         uint32_t docid = num_blocks;
180
181         // Create trigrams.
182         const char *ptr = current_block.c_str();
183         while (ptr < current_block.c_str() + current_block.size()) {
184                 string_view s(ptr);
185                 if (s.size() >= 3) {
186                         for (size_t j = 0; j < s.size() - 2; ++j) {
187                                 uint32_t trgm = read_trigram(s, j);
188                                 invindex[trgm].add_docid(docid);
189                         }
190                 }
191                 ptr += s.size() + 1;
192         }
193
194         // Compress and add the filename block.
195         filename_blocks.push_back(zstd_compress(current_block, &tempbuf));
196
197         current_block.clear();
198         num_files_in_block = 0;
199         ++num_blocks;
200 }
201
202 const char *handle_directory(const char *ptr, Corpus *corpus)
203 {
204         ptr += sizeof(db_directory);
205
206         string dir_path = ptr;
207         ptr += dir_path.size() + 1;
208         if (dir_path == "/") {
209                 dir_path = "";
210         }
211
212         for (;;) {
213                 uint8_t type = *ptr++;
214                 if (type == DBE_NORMAL) {
215                         string filename = ptr;
216                         corpus->add_file(dir_path + "/" + filename);
217                         ptr += filename.size() + 1;
218                 } else if (type == DBE_DIRECTORY) {
219                         string dirname = ptr;
220                         corpus->add_file(dir_path + "/" + dirname);
221                         ptr += dirname.size() + 1;
222                 } else {
223                         return ptr;
224                 }
225         }
226 }
227
228 void read_mlocate(const char *filename, Corpus *corpus)
229 {
230         int fd = open(filename, O_RDONLY);
231         if (fd == -1) {
232                 perror(filename);
233                 exit(1);
234         }
235         off_t len = lseek(fd, 0, SEEK_END);
236         if (len == -1) {
237                 perror("lseek");
238                 exit(1);
239         }
240         const char *data = (char *)mmap(nullptr, len, PROT_READ, MAP_SHARED, fd, /*offset=*/0);
241         if (data == MAP_FAILED) {
242                 perror("mmap");
243                 exit(1);
244         }
245
246         const db_header *hdr = (const db_header *)data;
247
248         // TODO: Care about the base path.
249         string path = data + sizeof(db_header);
250         uint64_t offset = sizeof(db_header) + path.size() + 1 + ntohl(hdr->conf_size);
251
252         const char *ptr = data + offset;
253         while (ptr < data + len) {
254                 ptr = handle_directory(ptr, corpus);
255         }
256
257         munmap((void *)data, len);
258         close(fd);
259 }
260
261 string zstd_compress(const string &src, string *tempbuf)
262 {
263         size_t max_size = ZSTD_compressBound(src.size());
264         if (tempbuf->size() < max_size) {
265                 tempbuf->resize(max_size);
266         }
267         size_t size = ZSTD_compress(&(*tempbuf)[0], max_size, src.data(), src.size(), /*level=*/6);
268         return string(tempbuf->data(), size);
269 }
270
271 bool is_prime(uint32_t x)
272 {
273         if ((x % 2) == 0 || (x % 3) == 0) {
274                 return false;
275         }
276         uint32_t limit = ceil(sqrt(x));
277         for (uint32_t factor = 5; factor <= limit; ++factor) {
278                 if ((x % factor) == 0) {
279                         return false;
280                 }
281         }
282         return true;
283 }
284
285 uint32_t next_prime(uint32_t x)
286 {
287         if ((x % 2) == 0) {
288                 ++x;
289         }
290         while (!is_prime(x)) {
291                 x += 2;
292         }
293         return x;
294 }
295
296 unique_ptr<Trigram[]> create_hashtable(const Corpus &corpus, const vector<uint32_t> &all_trigrams, uint32_t ht_size, uint32_t num_overflow_slots)
297 {
298         unique_ptr<Trigram[]> ht(new Trigram[ht_size + num_overflow_slots + 1]);  // 1 for the sentinel element at the end.
299         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
300                 ht[i].trgm = uint32_t(-1);
301                 ht[i].num_docids = 0;
302                 ht[i].offset = 0;
303         }
304         for (uint32_t trgm : all_trigrams) {
305                 // We don't know offset yet, so set it to zero.
306                 Trigram to_insert{ trgm, uint32_t(corpus.invindex.find(trgm)->second.num_docids), 0 };
307
308                 uint32_t bucket = hash_trigram(trgm, ht_size);
309                 unsigned distance = 0;
310                 while (ht[bucket].num_docids != 0) {
311                         // Robin Hood hashing; reduces the longest distance by a lot.
312                         unsigned other_distance = bucket - hash_trigram(ht[bucket].trgm, ht_size);
313                         if (distance > other_distance) {
314                                 swap(to_insert, ht[bucket]);
315                                 distance = other_distance;
316                         }
317
318                         ++bucket, ++distance;
319                         if (distance > num_overflow_slots) {
320                                 return nullptr;
321                         }
322                 }
323                 ht[bucket] = to_insert;
324         }
325         return ht;
326 }
327
328 void do_build(const char *infile, const char *outfile, int block_size)
329 {
330         steady_clock::time_point start __attribute__((unused)) = steady_clock::now();
331
332         Corpus corpus(block_size);
333
334         read_mlocate(infile, &corpus);
335         if (false) {  // To read a plain text file.
336                 FILE *fp = fopen(infile, "r");
337                 while (!feof(fp)) {
338                         char buf[1024];
339                         if (fgets(buf, 1024, fp) == nullptr || feof(fp)) {
340                                 break;
341                         }
342                         string s(buf);
343                         if (s.back() == '\n')
344                                 s.pop_back();
345                         corpus.add_file(move(s));
346                 }
347                 fclose(fp);
348         }
349         corpus.flush_block();
350         dprintf("Read %zu files from %s\n", corpus.num_files, infile);
351
352         size_t trigrams = 0, longest_posting_list = 0;
353         size_t bytes_used = 0;
354         for (auto &[trigram, pl_builder] : corpus.invindex) {
355                 pl_builder.finish();
356                 longest_posting_list = max(longest_posting_list, pl_builder.num_docids);
357                 trigrams += pl_builder.num_docids;
358                 bytes_used += pl_builder.encoded.size();
359         }
360         dprintf("%zu files, %zu different trigrams, %zu entries, avg len %.2f, longest %zu\n",
361                 corpus.num_files, corpus.invindex.size(), trigrams, double(trigrams) / corpus.invindex.size(), longest_posting_list);
362         dprintf("%zu bytes used for posting lists (%.2f bits/entry)\n", bytes_used, 8 * bytes_used / double(trigrams));
363
364         dprintf("Building posting lists took %.1f ms.\n\n", 1e3 * duration<float>(steady_clock::now() - start).count());
365
366         // Sort the trigrams, mostly to get a consistent result every time
367         // (the hash table will put things in random order anyway).
368         vector<uint32_t> all_trigrams;
369         for (auto &[trigram, pl_builder] : corpus.invindex) {
370                 all_trigrams.push_back(trigram);
371         }
372         sort(all_trigrams.begin(), all_trigrams.end());
373
374         unique_ptr<Trigram[]> hashtable;
375         uint32_t ht_size = next_prime(all_trigrams.size());
376         constexpr unsigned num_overflow_slots = 16;
377         for (;;) {
378                 hashtable = create_hashtable(corpus, all_trigrams, ht_size, num_overflow_slots);
379                 if (hashtable == nullptr) {
380                         dprintf("Failed creating hash table of size %u, increasing by 5%% and trying again.\n", ht_size);
381                         ht_size = next_prime(ht_size * 1.05);
382                 } else {
383                         dprintf("Created hash table of size %u.\n\n", ht_size);
384                         break;
385                 }
386         }
387
388         umask(0027);
389         FILE *outfp = fopen(outfile, "wb");
390
391         // Find the offsets for each posting list.
392         size_t bytes_for_hashtable = (ht_size + num_overflow_slots + 1) * sizeof(Trigram);
393         uint64_t offset = sizeof(Header) + bytes_for_hashtable;
394         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
395                 hashtable[i].offset = offset;  // Needs to be there even for empty slots.
396                 if (hashtable[i].num_docids == 0) {
397                         continue;
398                 }
399
400                 const PostingListBuilder &pl_builder = corpus.invindex[hashtable[i].trgm];
401                 offset += pl_builder.encoded.size();
402         }
403
404         // Write the header.
405         Header hdr;
406         memcpy(hdr.magic, "\0plocate", 8);
407         hdr.version = 0;
408         hdr.hashtable_size = ht_size;
409         hdr.extra_ht_slots = num_overflow_slots;
410         hdr.hash_table_offset_bytes = sizeof(hdr);  // This member is just there for flexibility.
411         hdr.filename_index_offset_bytes = offset;
412         fwrite(&hdr, sizeof(hdr), 1, outfp);
413
414         // Write the hash table.
415         fwrite(hashtable.get(), ht_size + num_overflow_slots + 1, sizeof(Trigram), outfp);
416
417         // Write the actual posting lists.
418         size_t bytes_for_posting_lists = 0;
419         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
420                 if (hashtable[i].num_docids == 0) {
421                         continue;
422                 }
423                 const string &encoded = corpus.invindex[hashtable[i].trgm].encoded;
424                 fwrite(encoded.data(), encoded.size(), 1, outfp);
425                 bytes_for_posting_lists += encoded.size();
426         }
427
428         // Stick an empty block at the end as sentinel.
429         corpus.filename_blocks.push_back("");
430
431         // Write the offsets to the filenames.
432         size_t bytes_for_filename_index = 0, bytes_for_filenames = 0;
433         offset = hdr.filename_index_offset_bytes + corpus.filename_blocks.size() * sizeof(offset);
434         for (const string &filename : corpus.filename_blocks) {
435                 fwrite(&offset, sizeof(offset), 1, outfp);
436                 offset += filename.size();
437                 bytes_for_filename_index += sizeof(offset);
438                 bytes_for_filenames += filename.size();
439         }
440
441         // Write the actual filenames.
442         for (const string &filename : corpus.filename_blocks) {
443                 fwrite(filename.data(), filename.size(), 1, outfp);
444         }
445
446         fclose(outfp);
447
448         size_t total_bytes __attribute__((unused)) = (bytes_for_hashtable + bytes_for_posting_lists + bytes_for_filename_index + bytes_for_filenames);
449
450         dprintf("Block size:     %7d files\n", block_size);
451         dprintf("Hash table:     %'7.1f MB\n", bytes_for_hashtable / 1048576.0);
452         dprintf("Posting lists:  %'7.1f MB\n", bytes_for_posting_lists / 1048576.0);
453         dprintf("Filename index: %'7.1f MB\n", bytes_for_filename_index / 1048576.0);
454         dprintf("Filenames:      %'7.1f MB\n", bytes_for_filenames / 1048576.0);
455         dprintf("Total:          %'7.1f MB\n", total_bytes / 1048576.0);
456         dprintf("\n");
457 }
458
459 int main(int argc, char **argv)
460 {
461         do_build(argv[1], argv[2], 32);
462         exit(EXIT_SUCCESS);
463 }