]> git.sesse.net Git - plocate/blob - plocate-build.cpp
Fix searching for very short (1 or 2 bytes) queries.
[plocate] / plocate-build.cpp
1 #include "db.h"
2 #include "vp4.h"
3
4 #include <algorithm>
5 #include <arpa/inet.h>
6 #include <assert.h>
7 #include <chrono>
8 #include <endian.h>
9 #include <fcntl.h>
10 #include <math.h>
11 #include <memory>
12 #include <stdio.h>
13 #include <string.h>
14 #include <string>
15 #include <sys/stat.h>
16 #include <sys/types.h>
17 #include <unistd.h>
18 #include <unordered_map>
19 #include <vector>
20 #include <zstd.h>
21
22 #define P4NENC_BOUND(n) ((n + 127) / 128 + (n + 32) * sizeof(uint32_t))
23 #define dprintf(...)
24 //#define dprintf(...) fprintf(stderr, __VA_ARGS__);
25
26 using namespace std;
27 using namespace std::chrono;
28
29 string zstd_compress(const string &src, string *tempbuf);
30
31 constexpr unsigned num_overflow_slots = 16;
32
33 static inline uint32_t read_unigram(const string_view s, size_t idx)
34 {
35         if (idx < s.size()) {
36                 return (unsigned char)s[idx];
37         } else {
38                 return 0;
39         }
40 }
41
42 static inline uint32_t read_trigram(const string_view s, size_t start)
43 {
44         return read_unigram(s, start) |
45                 (read_unigram(s, start + 1) << 8) |
46                 (read_unigram(s, start + 2) << 16);
47 }
48
49 enum {
50         DBE_NORMAL = 0, /* A non-directory file */
51         DBE_DIRECTORY = 1, /* A directory */
52         DBE_END = 2 /* End of directory contents; contains no name */
53 };
54
55 // From mlocate.
56 struct db_header {
57         uint8_t magic[8];
58         uint32_t conf_size;
59         uint8_t version;
60         uint8_t check_visibility;
61         uint8_t pad[2];
62 };
63
64 // From mlocate.
65 struct db_directory {
66         uint64_t time_sec;
67         uint32_t time_nsec;
68         uint8_t pad[4];
69 };
70
71 class PostingListBuilder {
72 public:
73         void add_docid(uint32_t docid);
74         void finish();
75
76         string encoded;
77         size_t num_docids = 0;
78
79 private:
80         void write_header(uint32_t docid);
81         void append_block();
82
83         vector<uint32_t> pending_docids;
84
85         uint32_t last_block_end;
86 };
87
88 void PostingListBuilder::add_docid(uint32_t docid)
89 {
90         // Deduplicate against the last inserted value, if any.
91         if (pending_docids.empty()) {
92                 if (encoded.empty()) {
93                         // Very first docid.
94                         write_header(docid);
95                         ++num_docids;
96                         last_block_end = docid;
97                         return;
98                 } else if (docid == last_block_end) {
99                         return;
100                 }
101         } else {
102                 if (docid == pending_docids.back()) {
103                         return;
104                 }
105         }
106
107         pending_docids.push_back(docid);
108         if (pending_docids.size() == 128) {
109                 append_block();
110                 pending_docids.clear();
111                 last_block_end = docid;
112         }
113         ++num_docids;
114 }
115
116 void PostingListBuilder::finish()
117 {
118         if (pending_docids.empty()) {
119                 return;
120         }
121
122         assert(!encoded.empty());  // write_header() should already have run.
123
124         // No interleaving for partial blocks.
125         unsigned char buf[P4NENC_BOUND(128)];
126         unsigned char *end = p4d1enc32(pending_docids.data(), pending_docids.size(), buf, last_block_end);
127         encoded.append(reinterpret_cast<char *>(buf), reinterpret_cast<char *>(end));
128 }
129
130 void PostingListBuilder::append_block()
131 {
132         unsigned char buf[P4NENC_BOUND(128)];
133         assert(pending_docids.size() == 128);
134         unsigned char *end = p4d1enc128v32(pending_docids.data(), 128, buf, last_block_end);
135         encoded.append(reinterpret_cast<char *>(buf), reinterpret_cast<char *>(end));
136 }
137
138 void PostingListBuilder::write_header(uint32_t docid)
139 {
140         unsigned char buf[P4NENC_BOUND(1)];
141         size_t bytes = p4nd1enc128v32(&docid, 1, buf);
142         encoded.append(reinterpret_cast<char *>(buf), bytes);
143 }
144
145 class Corpus {
146 public:
147         Corpus(FILE *outfp, size_t block_size)
148                 : outfp(outfp), block_size(block_size) {}
149         void add_file(string filename);
150         void flush_block();
151
152         vector<uint64_t> filename_blocks;
153         unordered_map<uint32_t, PostingListBuilder> invindex;
154         size_t num_files = 0, num_files_in_block = 0, num_blocks = 0;
155
156 private:
157         FILE *outfp;
158         string current_block;
159         string tempbuf;
160         const size_t block_size;
161 };
162
163 void Corpus::add_file(string filename)
164 {
165         ++num_files;
166         if (!current_block.empty()) {
167                 current_block.push_back('\0');
168         }
169         current_block += filename;
170         if (++num_files_in_block == block_size) {
171                 flush_block();
172         }
173 }
174
175 void Corpus::flush_block()
176 {
177         if (current_block.empty()) {
178                 return;
179         }
180
181         uint32_t docid = num_blocks;
182
183         // Create trigrams.
184         const char *ptr = current_block.c_str();
185         while (ptr < current_block.c_str() + current_block.size()) {
186                 string_view s(ptr);
187                 if (s.size() >= 3) {
188                         for (size_t j = 0; j < s.size() - 2; ++j) {
189                                 uint32_t trgm = read_trigram(s, j);
190                                 invindex[trgm].add_docid(docid);
191                         }
192                 }
193                 ptr += s.size() + 1;
194         }
195
196         // Compress and add the filename block.
197         filename_blocks.push_back(ftell(outfp));
198         string compressed = zstd_compress(current_block, &tempbuf);
199         if (fwrite(compressed.data(), compressed.size(), 1, outfp) != 1) {
200                 perror("fwrite()");
201                 exit(1);
202         }
203
204         current_block.clear();
205         num_files_in_block = 0;
206         ++num_blocks;
207 }
208
209 string read_cstr(FILE *fp)
210 {
211         string ret;
212         for ( ;; ) {
213                 int ch = getc(fp);
214                 if (ch == -1) {
215                         perror("getc");
216                         exit(1);
217                 }
218                 if (ch == 0) {
219                         return ret;
220                 }
221                 ret.push_back(ch);
222         }
223 }
224
225 void handle_directory(FILE *fp, Corpus *corpus)
226 {
227         db_directory dummy;
228         if (fread(&dummy, sizeof(dummy), 1, fp) != 1) {
229                 if (feof(fp)) {
230                         return;
231                 } else {
232                         perror("fread");
233                 }
234         }
235
236         string dir_path = read_cstr(fp);
237         if (dir_path == "/") {
238                 dir_path = "";
239         }
240
241         for (;;) {
242                 int type = getc(fp);
243                 if (type == DBE_NORMAL) {
244                         string filename = read_cstr(fp);
245                         corpus->add_file(dir_path + "/" + filename);
246                 } else if (type == DBE_DIRECTORY) {
247                         string dirname = read_cstr(fp);
248                         corpus->add_file(dir_path + "/" + dirname);
249                 } else {
250                         return;  // Probably end.
251                 }
252         }
253 }
254
255 void read_mlocate(const char *filename, Corpus *corpus)
256 {
257         FILE *fp = fopen(filename, "rb");
258         if (fp == nullptr) {
259                 perror(filename);
260                 exit(1);
261         }
262
263         db_header hdr;
264         if (fread(&hdr, sizeof(hdr), 1, fp) != 1) {
265                 perror("short read");
266                 exit(1);
267         }
268
269         // TODO: Care about the base path.
270         string path = read_cstr(fp);
271         while (!feof(fp)) {
272                 handle_directory(fp, corpus);
273         }
274         fclose(fp);
275 }
276
277 string zstd_compress(const string &src, string *tempbuf)
278 {
279         size_t max_size = ZSTD_compressBound(src.size());
280         if (tempbuf->size() < max_size) {
281                 tempbuf->resize(max_size);
282         }
283         size_t size = ZSTD_compress(&(*tempbuf)[0], max_size, src.data(), src.size(), /*level=*/6);
284         return string(tempbuf->data(), size);
285 }
286
287 bool is_prime(uint32_t x)
288 {
289         if ((x % 2) == 0 || (x % 3) == 0) {
290                 return false;
291         }
292         uint32_t limit = ceil(sqrt(x));
293         for (uint32_t factor = 5; factor <= limit; ++factor) {
294                 if ((x % factor) == 0) {
295                         return false;
296                 }
297         }
298         return true;
299 }
300
301 uint32_t next_prime(uint32_t x)
302 {
303         if ((x % 2) == 0) {
304                 ++x;
305         }
306         while (!is_prime(x)) {
307                 x += 2;
308         }
309         return x;
310 }
311
312 unique_ptr<Trigram[]> create_hashtable(const Corpus &corpus, const vector<uint32_t> &all_trigrams, uint32_t ht_size, uint32_t num_overflow_slots)
313 {
314         unique_ptr<Trigram[]> ht(new Trigram[ht_size + num_overflow_slots + 1]);  // 1 for the sentinel element at the end.
315         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
316                 ht[i].trgm = uint32_t(-1);
317                 ht[i].num_docids = 0;
318                 ht[i].offset = 0;
319         }
320         for (uint32_t trgm : all_trigrams) {
321                 // We don't know offset yet, so set it to zero.
322                 Trigram to_insert{ trgm, uint32_t(corpus.invindex.find(trgm)->second.num_docids), 0 };
323
324                 uint32_t bucket = hash_trigram(trgm, ht_size);
325                 unsigned distance = 0;
326                 while (ht[bucket].num_docids != 0) {
327                         // Robin Hood hashing; reduces the longest distance by a lot.
328                         unsigned other_distance = bucket - hash_trigram(ht[bucket].trgm, ht_size);
329                         if (distance > other_distance) {
330                                 swap(to_insert, ht[bucket]);
331                                 distance = other_distance;
332                         }
333
334                         ++bucket, ++distance;
335                         if (distance > num_overflow_slots) {
336                                 return nullptr;
337                         }
338                 }
339                 ht[bucket] = to_insert;
340         }
341         return ht;
342 }
343
344 void do_build(const char *infile, const char *outfile, int block_size)
345 {
346         steady_clock::time_point start __attribute__((unused)) = steady_clock::now();
347
348         umask(0027);
349         FILE *outfp = fopen(outfile, "wb");
350
351         // Write the header.
352         Header hdr;
353         memcpy(hdr.magic, "\0plocate", 8);
354         hdr.version = -1;  // Mark as broken.
355         hdr.hashtable_size = 0;  // Not known yet.
356         hdr.extra_ht_slots = num_overflow_slots;
357         hdr.hash_table_offset_bytes = -1;  // We don't know these offsets yet.
358         hdr.filename_index_offset_bytes = -1;
359         fwrite(&hdr, sizeof(hdr), 1, outfp);
360
361         Corpus corpus(outfp, block_size);
362
363         read_mlocate(infile, &corpus);
364         if (false) {  // To read a plain text file.
365                 FILE *fp = fopen(infile, "r");
366                 while (!feof(fp)) {
367                         char buf[1024];
368                         if (fgets(buf, 1024, fp) == nullptr || feof(fp)) {
369                                 break;
370                         }
371                         string s(buf);
372                         if (s.back() == '\n')
373                                 s.pop_back();
374                         corpus.add_file(move(s));
375                 }
376                 fclose(fp);
377         }
378         corpus.flush_block();
379         dprintf("Read %zu files from %s\n", corpus.num_files, infile);
380         hdr.num_docids = corpus.filename_blocks.size();
381
382         // Stick an empty block at the end as sentinel.
383         corpus.filename_blocks.push_back(ftell(outfp));
384         const size_t bytes_for_filenames = corpus.filename_blocks.back() - corpus.filename_blocks.front();
385
386         // Write the offsets to the filenames.
387         hdr.filename_index_offset_bytes = ftell(outfp);
388         const size_t bytes_for_filename_index = corpus.filename_blocks.size() * sizeof(uint64_t);
389         fwrite(corpus.filename_blocks.data(), corpus.filename_blocks.size(), sizeof(uint64_t), outfp);
390         corpus.filename_blocks.clear();
391         corpus.filename_blocks.shrink_to_fit();
392
393         // Finish up encoding the posting lists.
394         size_t trigrams = 0, longest_posting_list = 0;
395         size_t bytes_for_posting_lists = 0;
396         for (auto &[trigram, pl_builder] : corpus.invindex) {
397                 pl_builder.finish();
398                 longest_posting_list = max(longest_posting_list, pl_builder.num_docids);
399                 trigrams += pl_builder.num_docids;
400                 bytes_for_posting_lists += pl_builder.encoded.size();
401         }
402         dprintf("%zu files, %zu different trigrams, %zu entries, avg len %.2f, longest %zu\n",
403                 corpus.num_files, corpus.invindex.size(), trigrams, double(trigrams) / corpus.invindex.size(), longest_posting_list);
404         dprintf("%zu bytes used for posting lists (%.2f bits/entry)\n", bytes_for_posting_lists, 8 * bytes_for_posting_lists / double(trigrams));
405
406         dprintf("Building posting lists took %.1f ms.\n\n", 1e3 * duration<float>(steady_clock::now() - start).count());
407
408         // Sort the trigrams, mostly to get a consistent result every time
409         // (the hash table will put things in random order anyway).
410         vector<uint32_t> all_trigrams;
411         for (auto &[trigram, pl_builder] : corpus.invindex) {
412                 all_trigrams.push_back(trigram);
413         }
414         sort(all_trigrams.begin(), all_trigrams.end());
415
416         // Create the hash table.
417         unique_ptr<Trigram[]> hashtable;
418         uint32_t ht_size = next_prime(all_trigrams.size());
419         for (;;) {
420                 hashtable = create_hashtable(corpus, all_trigrams, ht_size, num_overflow_slots);
421                 if (hashtable == nullptr) {
422                         dprintf("Failed creating hash table of size %u, increasing by 5%% and trying again.\n", ht_size);
423                         ht_size = next_prime(ht_size * 1.05);
424                 } else {
425                         dprintf("Created hash table of size %u.\n\n", ht_size);
426                         break;
427                 }
428         }
429
430         // Find the offsets for each posting list.
431         size_t bytes_for_hashtable = (ht_size + num_overflow_slots + 1) * sizeof(Trigram);
432         uint64_t offset = ftell(outfp) + bytes_for_hashtable;
433         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
434                 hashtable[i].offset = offset;  // Needs to be there even for empty slots.
435                 if (hashtable[i].num_docids == 0) {
436                         continue;
437                 }
438
439                 const string &encoded = corpus.invindex[hashtable[i].trgm].encoded;
440                 offset += encoded.size();
441         }
442
443         // Write the hash table.
444         hdr.hash_table_offset_bytes = ftell(outfp);
445         hdr.hashtable_size = ht_size;
446         fwrite(hashtable.get(), ht_size + num_overflow_slots + 1, sizeof(Trigram), outfp);
447
448         // Write the actual posting lists.
449         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
450                 if (hashtable[i].num_docids == 0) {
451                         continue;
452                 }
453                 const string &encoded = corpus.invindex[hashtable[i].trgm].encoded;
454                 fwrite(encoded.data(), encoded.size(), 1, outfp);
455         }
456
457         // Rewind, and write the updated header.
458         hdr.version = 0;
459         fseek(outfp, 0, SEEK_SET);
460         fwrite(&hdr, sizeof(hdr), 1, outfp);
461         fclose(outfp);
462
463         size_t total_bytes __attribute__((unused)) = (bytes_for_hashtable + bytes_for_posting_lists + bytes_for_filename_index + bytes_for_filenames);
464
465         dprintf("Block size:     %7d files\n", block_size);
466         dprintf("Hash table:     %'7.1f MB\n", bytes_for_hashtable / 1048576.0);
467         dprintf("Posting lists:  %'7.1f MB\n", bytes_for_posting_lists / 1048576.0);
468         dprintf("Filename index: %'7.1f MB\n", bytes_for_filename_index / 1048576.0);
469         dprintf("Filenames:      %'7.1f MB\n", bytes_for_filenames / 1048576.0);
470         dprintf("Total:          %'7.1f MB\n", total_bytes / 1048576.0);
471         dprintf("\n");
472 }
473
474 int main(int argc, char **argv)
475 {
476         do_build(argv[1], argv[2], 32);
477         exit(EXIT_SUCCESS);
478 }