]> git.sesse.net Git - plocate/blob - database-builder.cpp
Proof-of-concept of using ICU for strength-zero searches.
[plocate] / database-builder.cpp
1 #include "database-builder.h"
2
3 #include "dprintf.h"
4 #include "turbopfor-encode.h"
5
6 #include <algorithm>
7 #include <assert.h>
8 #ifdef HAS_ENDIAN_H
9 #include <endian.h>
10 #endif
11 #include <fcntl.h>
12 #include <string.h>
13 #include <string_view>
14 #include <sys/stat.h>
15 #include <sys/time.h>
16 #include <sys/types.h>
17 #include <unistd.h>
18 #include <zdict.h>
19 #include <zstd.h>
20 #include <unicode/coll.h>
21 #include <unicode/locid.h>
22
23 #define P4NENC_BOUND(n) ((n + 127) / 128 + (n + 32) * sizeof(uint32_t))
24
25 #define NUM_TRIGRAMS 16777216
26
27 using namespace std;
28 using namespace std::chrono;
29
30 constexpr unsigned num_overflow_slots = 16;
31
32 string zstd_compress(const string &src, ZSTD_CDict *cdict, string *tempbuf);
33
34 class PostingListBuilder {
35 public:
36         inline void add_docid(uint32_t docid);
37         inline void add_first_docid(uint32_t docid);
38         void finish();
39
40         vector<unsigned char> encoded;
41         size_t get_num_docids() const
42         {
43                 // Updated only when we flush, so check that we're finished.
44                 assert(pending_deltas.empty());
45                 return num_docids;
46         }
47
48 private:
49         void write_header(uint32_t docid);
50         void append_block();
51
52         vector<uint32_t> pending_deltas;
53
54         uint32_t num_docids = 0;  // Should be size_t, except the format only supports 2^32 docids per posting list anyway.
55         uint32_t last_docid = -1;
56 };
57
58 void PostingListBuilder::add_docid(uint32_t docid)
59 {
60         // Deduplicate against the last inserted value, if any.
61         if (docid == last_docid) {
62                 return;
63         }
64
65         pending_deltas.push_back(docid - last_docid - 1);
66         last_docid = docid;
67         if (pending_deltas.size() == 128) {
68                 append_block();
69                 pending_deltas.clear();
70                 num_docids += 128;
71         }
72 }
73
74 void PostingListBuilder::add_first_docid(uint32_t docid)
75 {
76         write_header(docid);
77         ++num_docids;
78         last_docid = docid;
79 }
80
81 void PostingListBuilder::finish()
82 {
83         if (pending_deltas.empty()) {
84                 return;
85         }
86
87         assert(!encoded.empty());  // write_header() should already have run.
88
89         // No interleaving for partial blocks.
90         unsigned char buf[P4NENC_BOUND(128)];
91         unsigned char *end = encode_pfor_single_block<128>(pending_deltas.data(), pending_deltas.size(), /*interleaved=*/false, buf);
92         encoded.insert(encoded.end(), buf, end);
93
94         num_docids += pending_deltas.size();
95         pending_deltas.clear();
96 }
97
98 void PostingListBuilder::append_block()
99 {
100         unsigned char buf[P4NENC_BOUND(128)];
101         assert(pending_deltas.size() == 128);
102         unsigned char *end = encode_pfor_single_block<128>(pending_deltas.data(), 128, /*interleaved=*/true, buf);
103         encoded.insert(encoded.end(), buf, end);
104 }
105
106 void PostingListBuilder::write_header(uint32_t docid)
107 {
108         unsigned char buf[P4NENC_BOUND(1)];
109         unsigned char *end = write_baseval(docid, buf);
110         encoded.insert(encoded.end(), buf, end);
111 }
112
113 void DictionaryBuilder::add_file(string filename, dir_time)
114 {
115         if (keep_current_block) {  // Only bother saving the filenames if we're actually keeping the block.
116                 if (!current_block.empty()) {
117                         current_block.push_back('\0');
118                 }
119                 current_block += filename;
120         }
121         if (++num_files_in_block == block_size) {
122                 flush_block();
123         }
124 }
125
126 void DictionaryBuilder::flush_block()
127 {
128         if (keep_current_block) {
129                 if (slot_for_current_block == -1) {
130                         lengths.push_back(current_block.size());
131                         sampled_blocks.push_back(move(current_block));
132                 } else {
133                         lengths[slot_for_current_block] = current_block.size();
134                         sampled_blocks[slot_for_current_block] = move(current_block);
135                 }
136         }
137         current_block.clear();
138         num_files_in_block = 0;
139         ++block_num;
140
141         if (block_num < blocks_to_keep) {
142                 keep_current_block = true;
143                 slot_for_current_block = -1;
144         } else {
145                 // Keep every block with equal probability (reservoir sampling).
146                 uint64_t idx = uniform_int_distribution<uint64_t>(0, block_num)(reservoir_rand);
147                 keep_current_block = (idx < blocks_to_keep);
148                 slot_for_current_block = idx;
149         }
150 }
151
152 string DictionaryBuilder::train(size_t buf_size)
153 {
154         string dictionary_buf;
155         sort(sampled_blocks.begin(), sampled_blocks.end());  // Seemingly important for decompression speed.
156         for (const string &block : sampled_blocks) {
157                 dictionary_buf += block;
158         }
159
160         string buf;
161         buf.resize(buf_size);
162         size_t ret = ZDICT_trainFromBuffer(&buf[0], buf_size, dictionary_buf.data(), lengths.data(), lengths.size());
163         if (ZDICT_isError(ret)) {
164                 return "";
165         }
166         dprintf("Sampled %zu bytes in %zu blocks, built a dictionary of size %zu\n", dictionary_buf.size(), lengths.size(), ret);
167         buf.resize(ret);
168
169         sampled_blocks.clear();
170         lengths.clear();
171
172         return buf;
173 }
174
175 class EncodingCorpus : public DatabaseReceiver {
176 public:
177         EncodingCorpus(FILE *outfp, size_t block_size, ZSTD_CDict *cdict, bool store_dir_times);
178         ~EncodingCorpus();
179
180         void add_file(std::string filename, dir_time dt) override;
181         void flush_block() override;
182         void finish() override;
183
184         std::vector<uint64_t> filename_blocks;
185         size_t num_files = 0, num_files_in_block = 0, num_blocks = 0;
186         bool seen_trigram(uint32_t trgm)
187         {
188                 return invindex[trgm] != nullptr;
189         }
190         size_t num_files_seen() const override { return num_files; }
191         PostingListBuilder &get_pl_builder(uint32_t trgm)
192         {
193                 return *invindex[trgm];
194         }
195
196         void add_docid(uint32_t trgm, uint32_t docid)
197         {
198                 if (invindex[trgm] == nullptr) {
199                         invindex[trgm] = new PostingListBuilder;
200                         invindex[trgm]->add_first_docid(docid);
201                 } else {
202                         invindex[trgm]->add_docid(docid);
203                 }
204         }
205
206         size_t num_trigrams() const;
207         std::string get_compressed_dir_times();
208
209 private:
210         void compress_dir_times(size_t allowed_slop);
211
212         std::unique_ptr<PostingListBuilder *[]> invindex;
213         FILE *outfp;
214         off_t outfp_pos;  // Cheaper than calling ftell(outfp) all the time.
215         std::string current_block;
216         std::string tempbuf;
217         const size_t block_size;
218         const bool store_dir_times;
219         ZSTD_CDict *cdict;
220
221         ZSTD_CStream *dir_time_ctx = nullptr;
222         std::string dir_times;  // Buffer of still-uncompressed data.
223         std::string dir_times_compressed;
224 };
225
226 EncodingCorpus::EncodingCorpus(FILE *outfp, size_t block_size, ZSTD_CDict *cdict, bool store_dir_times)
227         : invindex(new PostingListBuilder *[NUM_TRIGRAMS]), outfp(outfp), outfp_pos(ftell(outfp)), block_size(block_size), store_dir_times(store_dir_times), cdict(cdict)
228 {
229         fill(invindex.get(), invindex.get() + NUM_TRIGRAMS, nullptr);
230         if (store_dir_times) {
231                 dir_time_ctx = ZSTD_createCStream();
232                 ZSTD_initCStream(dir_time_ctx, /*level=*/6);
233         }
234 }
235
236 EncodingCorpus::~EncodingCorpus()
237 {
238         for (unsigned i = 0; i < NUM_TRIGRAMS; ++i) {
239                 delete invindex[i];
240         }
241 }
242
243 void EncodingCorpus::add_file(string filename, dir_time dt)
244 {
245         ++num_files;
246         if (!current_block.empty()) {
247                 current_block.push_back('\0');
248         }
249         current_block += filename;
250         if (++num_files_in_block == block_size) {
251                 flush_block();
252         }
253
254         if (store_dir_times) {
255                 if (dt.sec == -1) {
256                         // Not a directory.
257                         dir_times.push_back('\0');
258                 } else {
259                         dir_times.push_back('\1');
260                         dir_times.append(reinterpret_cast<char *>(&dt.sec), sizeof(dt.sec));
261                         dir_times.append(reinterpret_cast<char *>(&dt.nsec), sizeof(dt.nsec));
262                 }
263                 compress_dir_times(/*allowed_slop=*/4096);
264         }
265 }
266
267 void EncodingCorpus::compress_dir_times(size_t allowed_slop)
268 {
269         while (dir_times.size() >= allowed_slop) {
270                 size_t old_size = dir_times_compressed.size();
271                 dir_times_compressed.resize(old_size + 4096);
272
273                 ZSTD_outBuffer outbuf;
274                 outbuf.dst = dir_times_compressed.data() + old_size;
275                 outbuf.size = 4096;
276                 outbuf.pos = 0;
277
278                 ZSTD_inBuffer inbuf;
279                 inbuf.src = dir_times.data();
280                 inbuf.size = dir_times.size();
281                 inbuf.pos = 0;
282
283                 int ret = ZSTD_compressStream(dir_time_ctx, &outbuf, &inbuf);
284                 if (ret < 0) {
285                         fprintf(stderr, "ZSTD_compressStream() failed\n");
286                         exit(1);
287                 }
288
289                 dir_times_compressed.resize(old_size + outbuf.pos);
290                 dir_times.erase(dir_times.begin(), dir_times.begin() + inbuf.pos);
291
292                 if (outbuf.pos == 0 && inbuf.pos == 0) {
293                         // Nothing happened (not enough data?), try again later.
294                         return;
295                 }
296         }
297 }
298
299 void EncodingCorpus::flush_block()
300 {
301         if (current_block.empty()) {
302                 return;
303         }
304
305         uint32_t docid = num_blocks;
306
307         // Oh, ICU...
308         vector<uint8_t> sort_key;
309         sort_key.resize(32);
310         int32_t num_locales;
311         const icu::Locale* locales = icu::Collator::getAvailableLocales(num_locales);
312         for (int i = 0; i < num_locales; ++i) {
313                 const icu::Locale &loc = locales[i];
314                 if (strcmp(loc.getName(), "en_US_POSIX") == 0) {
315                         continue;  // Too weird.
316                 }
317                 UErrorCode status = U_ZERO_ERROR;
318                 icu::Collator *coll = icu::Collator::createInstance(loc, status);
319                 if (U_FAILURE(status)) {
320                         fprintf(stderr, "ERROR: Failed to create collator\n");
321                         exit(1);
322                 }
323                 coll->setStrength(icu::Collator::PRIMARY);
324                 const char *ptr = current_block.c_str();
325                 const char *end = ptr + current_block.size();
326                 while (ptr < end) {
327                         size_t len = strlen(ptr);
328                         int32_t sortkey_len;
329                         for ( ;; ) {
330                                 sortkey_len = coll->getSortKey(icu::UnicodeString::fromUTF8(icu::StringPiece(ptr, len)), sort_key.data(), sort_key.size());
331                                 if (sortkey_len < sort_key.size()) {  // Note <, not <=; we need to keep a slop byte.
332                                         break;
333                                 }
334                                 sort_key.resize(sortkey_len * 3 / 2);
335                         }
336
337                         const uint8_t *keyptr = &sort_key[0];
338                         const uint8_t *keyend = keyptr + sortkey_len;
339                         while (keyptr < keyend - 3) {
340                                 // NOTE: Will read one byte past the end of the trigram, but it's OK,
341                                 // since we always call it from contexts where there's a terminating zero byte.
342                                 uint32_t trgm;
343                                 memcpy(&trgm, keyptr, sizeof(trgm));
344                                 ++keyptr;
345                                 trgm = le32toh(trgm);
346                                 add_docid(trgm & 0xffffff, docid);
347                         }
348
349                         ptr += len + 1;
350                 }
351         }
352 #if 0
353         // Create trigrams.
354         const char *ptr = current_block.c_str();
355         const char *end = ptr + current_block.size();
356         while (ptr < end - 3) {  // Must be at least one filename left, that's at least three bytes.
357                 if (ptr[0] == '\0') {
358                         // This filename is zero bytes, so skip it (and the zero terminator).
359                         ++ptr;
360                         continue;
361                 } else if (ptr[1] == '\0') {
362                         // This filename is one byte, so skip it (and the zero terminator).
363                         ptr += 2;
364                         continue;
365                 } else if (ptr[2] == '\0') {
366                         // This filename is two bytes, so skip it (and the zero terminator).
367                         ptr += 3;
368                         continue;
369                 }
370                 for (;;) {
371                         // NOTE: Will read one byte past the end of the trigram, but it's OK,
372                         // since we always call it from contexts where there's a terminating zero byte.
373                         uint32_t trgm;
374                         memcpy(&trgm, ptr, sizeof(trgm));
375                         ++ptr;
376                         trgm = le32toh(trgm);
377                         add_docid(trgm & 0xffffff, docid);
378                         if (trgm <= 0xffffff) {
379                                 // Terminating zero byte, so we're done with this filename.
380                                 // Skip the remaining two bytes, and the zero terminator.
381                                 ptr += 3;
382                                 break;
383                         }
384                 }
385         }
386 #endif
387
388         // Compress and add the filename block.
389         filename_blocks.push_back(outfp_pos);
390         string compressed = zstd_compress(current_block, cdict, &tempbuf);
391         if (fwrite(compressed.data(), compressed.size(), 1, outfp) != 1) {
392                 perror("fwrite()");
393                 exit(1);
394         }
395         outfp_pos += compressed.size();
396
397         current_block.clear();
398         num_files_in_block = 0;
399         ++num_blocks;
400 }
401
402 void EncodingCorpus::finish()
403 {
404         flush_block();
405 }
406
407 size_t EncodingCorpus::num_trigrams() const
408 {
409         size_t num = 0;
410         for (unsigned trgm = 0; trgm < NUM_TRIGRAMS; ++trgm) {
411                 if (invindex[trgm] != nullptr) {
412                         ++num;
413                 }
414         }
415         return num;
416 }
417
418 string EncodingCorpus::get_compressed_dir_times()
419 {
420         if (!store_dir_times) {
421                 return "";
422         }
423         compress_dir_times(/*allowed_slop=*/0);
424         assert(dir_times.empty());
425
426         for (;;) {
427                 size_t old_size = dir_times_compressed.size();
428                 dir_times_compressed.resize(old_size + 4096);
429
430                 ZSTD_outBuffer outbuf;
431                 outbuf.dst = dir_times_compressed.data() + old_size;
432                 outbuf.size = 4096;
433                 outbuf.pos = 0;
434
435                 int ret = ZSTD_endStream(dir_time_ctx, &outbuf);
436                 if (ret < 0) {
437                         fprintf(stderr, "ZSTD_compressStream() failed\n");
438                         exit(1);
439                 }
440
441                 dir_times_compressed.resize(old_size + outbuf.pos);
442
443                 if (ret == 0) {
444                         // All done.
445                         break;
446                 }
447         }
448
449         return dir_times_compressed;
450 }
451
452 string zstd_compress(const string &src, ZSTD_CDict *cdict, string *tempbuf)
453 {
454         static ZSTD_CCtx *ctx = nullptr;
455         if (ctx == nullptr) {
456                 ctx = ZSTD_createCCtx();
457         }
458
459         size_t max_size = ZSTD_compressBound(src.size());
460         if (tempbuf->size() < max_size) {
461                 tempbuf->resize(max_size);
462         }
463         size_t size;
464         if (cdict == nullptr) {
465                 size = ZSTD_compressCCtx(ctx, &(*tempbuf)[0], max_size, src.data(), src.size(), /*level=*/6);
466         } else {
467                 size = ZSTD_compress_usingCDict(ctx, &(*tempbuf)[0], max_size, src.data(), src.size(), cdict);
468         }
469         return string(tempbuf->data(), size);
470 }
471
472 bool is_prime(uint32_t x)
473 {
474         if ((x % 2) == 0 || (x % 3) == 0) {
475                 return false;
476         }
477         uint32_t limit = ceil(sqrt(x));
478         for (uint32_t factor = 5; factor <= limit; ++factor) {
479                 if ((x % factor) == 0) {
480                         return false;
481                 }
482         }
483         return true;
484 }
485
486 uint32_t next_prime(uint32_t x)
487 {
488         if ((x % 2) == 0) {
489                 ++x;
490         }
491         while (!is_prime(x)) {
492                 x += 2;
493         }
494         return x;
495 }
496
497 unique_ptr<Trigram[]> create_hashtable(EncodingCorpus &corpus, const vector<uint32_t> &all_trigrams, uint32_t ht_size, uint32_t num_overflow_slots)
498 {
499         unique_ptr<Trigram[]> ht(new Trigram[ht_size + num_overflow_slots + 1]);  // 1 for the sentinel element at the end.
500         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
501                 ht[i].trgm = uint32_t(-1);
502                 ht[i].num_docids = 0;
503                 ht[i].offset = 0;
504         }
505         for (uint32_t trgm : all_trigrams) {
506                 // We don't know offset yet, so set it to zero.
507                 Trigram to_insert{ trgm, uint32_t(corpus.get_pl_builder(trgm).get_num_docids()), 0 };
508
509                 uint32_t bucket = hash_trigram(trgm, ht_size);
510                 unsigned distance = 0;
511                 while (ht[bucket].num_docids != 0) {
512                         // Robin Hood hashing; reduces the longest distance by a lot.
513                         unsigned other_distance = bucket - hash_trigram(ht[bucket].trgm, ht_size);
514                         if (distance > other_distance) {
515                                 swap(to_insert, ht[bucket]);
516                                 distance = other_distance;
517                         }
518
519                         ++bucket, ++distance;
520                         if (distance > num_overflow_slots) {
521                                 return nullptr;
522                         }
523                 }
524                 ht[bucket] = to_insert;
525         }
526         return ht;
527 }
528
529 DatabaseBuilder::DatabaseBuilder(const char *outfile, gid_t owner, int block_size, string dictionary, bool check_visibility)
530         : outfile(outfile), block_size(block_size)
531 {
532         umask(0027);
533
534         string path = outfile;
535         path.resize(path.find_last_of('/') + 1);
536         if (path.empty()) {
537                 path = ".";
538         }
539         int fd = -1;
540 #ifdef O_TMPFILE
541         fd = open(path.c_str(), O_WRONLY | O_TMPFILE, 0640);
542         if (fd == -1 && errno != EOPNOTSUPP && errno != EISDIR) {
543                 perror(path.c_str());
544                 exit(1);
545         }
546 #endif
547         if (fd == -1) {
548                 temp_filename = string(outfile) + ".XXXXXX";
549                 fd = mkstemp(&temp_filename[0]);
550                 if (fd == -1) {
551                         perror(temp_filename.c_str());
552                         exit(1);
553                 }
554                 if (fchmod(fd, 0640) == -1) {
555                         perror("fchmod");
556                         exit(1);
557                 }
558         }
559
560         if (owner != (gid_t)-1) {
561                 if (fchown(fd, (uid_t)-1, owner) == -1) {
562                         perror("fchown");
563                         exit(1);
564                 }
565         }
566
567         outfp = fdopen(fd, "wb");
568         if (outfp == nullptr) {
569                 perror(outfile);
570                 exit(1);
571         }
572
573         // Write the header.
574         memcpy(hdr.magic, "\0plocate", 8);
575         hdr.version = -1;  // Mark as broken.
576         hdr.hashtable_size = 0;  // Not known yet.
577         hdr.extra_ht_slots = num_overflow_slots;
578         hdr.num_docids = 0;
579         hdr.hash_table_offset_bytes = -1;  // We don't know these offsets yet.
580         hdr.max_version = 2;
581         hdr.filename_index_offset_bytes = -1;
582         hdr.zstd_dictionary_length_bytes = -1;
583         hdr.check_visibility = check_visibility;
584         fwrite(&hdr, sizeof(hdr), 1, outfp);
585
586         if (dictionary.empty()) {
587                 hdr.zstd_dictionary_offset_bytes = 0;
588                 hdr.zstd_dictionary_length_bytes = 0;
589         } else {
590                 hdr.zstd_dictionary_offset_bytes = ftell(outfp);
591                 fwrite(dictionary.data(), dictionary.size(), 1, outfp);
592                 hdr.zstd_dictionary_length_bytes = dictionary.size();
593                 cdict = ZSTD_createCDict(dictionary.data(), dictionary.size(), /*level=*/6);
594         }
595
596         hdr.directory_data_length_bytes = 0;
597         hdr.directory_data_offset_bytes = 0;
598         hdr.next_zstd_dictionary_length_bytes = 0;
599         hdr.next_zstd_dictionary_offset_bytes = 0;
600         hdr.conf_block_length_bytes = 0;
601         hdr.conf_block_offset_bytes = 0;
602 }
603
604 DatabaseReceiver *DatabaseBuilder::start_corpus(bool store_dir_times)
605 {
606         corpus_start = steady_clock::now();
607         corpus = new EncodingCorpus(outfp, block_size, cdict, store_dir_times);
608         return corpus;
609 }
610
611 void DatabaseBuilder::set_next_dictionary(std::string next_dictionary)
612 {
613         this->next_dictionary = move(next_dictionary);
614 }
615
616 void DatabaseBuilder::set_conf_block(std::string conf_block)
617 {
618         this->conf_block = move(conf_block);
619 }
620
621 void DatabaseBuilder::finish_corpus()
622 {
623         corpus->finish();
624         hdr.num_docids = corpus->filename_blocks.size();
625
626         // Stick an empty block at the end as sentinel.
627         corpus->filename_blocks.push_back(ftell(outfp));
628         const size_t bytes_for_filenames = corpus->filename_blocks.back() - corpus->filename_blocks.front();
629
630         // Write the offsets to the filenames.
631         hdr.filename_index_offset_bytes = ftell(outfp);
632         const size_t bytes_for_filename_index = corpus->filename_blocks.size() * sizeof(uint64_t);
633         fwrite(corpus->filename_blocks.data(), corpus->filename_blocks.size(), sizeof(uint64_t), outfp);
634         corpus->filename_blocks.clear();
635         corpus->filename_blocks.shrink_to_fit();
636
637         // Finish up encoding the posting lists.
638         size_t trigrams = 0, longest_posting_list = 0;
639         size_t bytes_for_posting_lists = 0;
640         for (unsigned trgm = 0; trgm < NUM_TRIGRAMS; ++trgm) {
641                 if (!corpus->seen_trigram(trgm))
642                         continue;
643                 PostingListBuilder &pl_builder = corpus->get_pl_builder(trgm);
644                 pl_builder.finish();
645                 longest_posting_list = max(longest_posting_list, pl_builder.get_num_docids());
646                 trigrams += pl_builder.get_num_docids();
647                 bytes_for_posting_lists += pl_builder.encoded.size();
648         }
649         size_t num_trigrams = corpus->num_trigrams();
650         dprintf("%zu files, %zu different trigrams, %zu entries, avg len %.2f, longest %zu\n",
651                 corpus->num_files, num_trigrams, trigrams, double(trigrams) / num_trigrams, longest_posting_list);
652         dprintf("%zu bytes used for posting lists (%.2f bits/entry)\n", bytes_for_posting_lists, 8 * bytes_for_posting_lists / double(trigrams));
653
654         dprintf("Building posting lists took %.1f ms.\n\n", 1e3 * duration<float>(steady_clock::now() - corpus_start).count());
655
656         // Find the used trigrams.
657         vector<uint32_t> all_trigrams;
658         for (unsigned trgm = 0; trgm < NUM_TRIGRAMS; ++trgm) {
659                 if (corpus->seen_trigram(trgm)) {
660                         all_trigrams.push_back(trgm);
661                 }
662         }
663
664         // Create the hash table.
665         unique_ptr<Trigram[]> hashtable;
666         uint32_t ht_size = next_prime(all_trigrams.size());
667         for (;;) {
668                 hashtable = create_hashtable(*corpus, all_trigrams, ht_size, num_overflow_slots);
669                 if (hashtable == nullptr) {
670                         dprintf("Failed creating hash table of size %u, increasing by 5%% and trying again.\n", ht_size);
671                         ht_size = next_prime(ht_size * 1.05);
672                 } else {
673                         dprintf("Created hash table of size %u.\n\n", ht_size);
674                         break;
675                 }
676         }
677
678         // Find the offsets for each posting list.
679         size_t bytes_for_hashtable = (ht_size + num_overflow_slots + 1) * sizeof(Trigram);
680         uint64_t offset = ftell(outfp) + bytes_for_hashtable;
681         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
682                 hashtable[i].offset = offset;  // Needs to be there even for empty slots.
683                 if (hashtable[i].num_docids == 0) {
684                         continue;
685                 }
686
687                 const vector<unsigned char> &encoded = corpus->get_pl_builder(hashtable[i].trgm).encoded;
688                 offset += encoded.size();
689         }
690
691         // Write the hash table.
692         hdr.hash_table_offset_bytes = ftell(outfp);
693         hdr.hashtable_size = ht_size;
694         fwrite(hashtable.get(), ht_size + num_overflow_slots + 1, sizeof(Trigram), outfp);
695
696         // Write the actual posting lists.
697         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
698                 if (hashtable[i].num_docids == 0) {
699                         continue;
700                 }
701                 const vector<unsigned char> &encoded = corpus->get_pl_builder(hashtable[i].trgm).encoded;
702                 fwrite(encoded.data(), encoded.size(), 1, outfp);
703         }
704
705         // Finally, write the directory times (for updatedb).
706         string compressed_dir_times = corpus->get_compressed_dir_times();
707         size_t bytes_for_compressed_dir_times = 0;
708         if (!compressed_dir_times.empty()) {
709                 hdr.directory_data_offset_bytes = ftell(outfp);
710                 hdr.directory_data_length_bytes = compressed_dir_times.size();
711                 fwrite(compressed_dir_times.data(), compressed_dir_times.size(), 1, outfp);
712                 bytes_for_compressed_dir_times = compressed_dir_times.size();
713                 compressed_dir_times.clear();
714         }
715
716         // Write the recommended dictionary for next update.
717         if (!next_dictionary.empty()) {
718                 hdr.next_zstd_dictionary_offset_bytes = ftell(outfp);
719                 hdr.next_zstd_dictionary_length_bytes = next_dictionary.size();
720                 fwrite(next_dictionary.data(), next_dictionary.size(), 1, outfp);
721         }
722
723         // And the configuration block.
724         if (!conf_block.empty()) {
725                 hdr.conf_block_offset_bytes = ftell(outfp);
726                 hdr.conf_block_length_bytes = conf_block.size();
727                 fwrite(conf_block.data(), conf_block.size(), 1, outfp);
728         }
729
730         // Rewind, and write the updated header.
731         hdr.version = 1;
732         fseek(outfp, 0, SEEK_SET);
733         fwrite(&hdr, sizeof(hdr), 1, outfp);
734
735         if (!temp_filename.empty()) {
736                 if (rename(temp_filename.c_str(), outfile.c_str()) == -1) {
737                         perror("rename");
738                         exit(1);
739                 }
740         } else {
741 #ifdef O_TMPFILE
742                 // Give the file a proper name, making it visible in the file system.
743                 // TODO: It would be nice to be able to do this atomically, like with rename.
744                 unlink(outfile.c_str());
745                 char procpath[256];
746                 snprintf(procpath, sizeof(procpath), "/proc/self/fd/%d", fileno(outfp));
747                 if (linkat(AT_FDCWD, procpath, AT_FDCWD, outfile.c_str(), AT_SYMLINK_FOLLOW) == -1) {
748                         perror("linkat");
749                         exit(1);
750                 }
751 #endif
752         }
753
754         fclose(outfp);
755
756         size_t total_bytes = (bytes_for_hashtable + bytes_for_posting_lists + bytes_for_filename_index + bytes_for_filenames + bytes_for_compressed_dir_times);
757
758         dprintf("Block size:     %7d files\n", block_size);
759         dprintf("Dictionary:     %'7.1f MB\n", hdr.zstd_dictionary_length_bytes / 1048576.0);
760         dprintf("Hash table:     %'7.1f MB\n", bytes_for_hashtable / 1048576.0);
761         dprintf("Posting lists:  %'7.1f MB\n", bytes_for_posting_lists / 1048576.0);
762         dprintf("Filename index: %'7.1f MB\n", bytes_for_filename_index / 1048576.0);
763         dprintf("Filenames:      %'7.1f MB\n", bytes_for_filenames / 1048576.0);
764         if (bytes_for_compressed_dir_times != 0) {
765                 dprintf("Modify times:   %'7.1f MB\n", bytes_for_compressed_dir_times / 1048576.0);
766         }
767         dprintf("Total:          %'7.1f MB\n", total_bytes / 1048576.0);
768         dprintf("\n");
769 }