]> git.sesse.net Git - plocate/blob - database-builder.cpp
Release plocate 1.1.22.
[plocate] / database-builder.cpp
1 #include "database-builder.h"
2
3 #include "dprintf.h"
4 #include "turbopfor-encode.h"
5
6 #include <algorithm>
7 #include <assert.h>
8 #ifdef HAS_ENDIAN_H
9 #include <endian.h>
10 #endif
11 #include <fcntl.h>
12 #include <string.h>
13 #include <string_view>
14 #include <sys/stat.h>
15 #include <sys/time.h>
16 #include <sys/types.h>
17 #include <unistd.h>
18 #include <zdict.h>
19 #include <zstd.h>
20
21 #define P4NENC_BOUND(n) ((n + 127) / 128 + (n + 32) * sizeof(uint32_t))
22
23 #define NUM_TRIGRAMS 16777216
24
25 using namespace std;
26 using namespace std::chrono;
27
28 constexpr unsigned num_overflow_slots = 16;
29
30 string zstd_compress(const string &src, ZSTD_CDict *cdict, string *tempbuf);
31
32 class PostingListBuilder {
33 public:
34         inline void add_docid(uint32_t docid);
35         inline void add_first_docid(uint32_t docid);
36         void finish();
37
38         vector<unsigned char> encoded;
39         size_t get_num_docids() const {
40                 // Updated only when we flush, so check that we're finished.
41                 assert(pending_deltas.empty());
42                 return num_docids;
43         }
44
45 private:
46         void write_header(uint32_t docid);
47         void append_block();
48
49         vector<uint32_t> pending_deltas;
50
51         uint32_t num_docids = 0;  // Should be size_t, except the format only supports 2^32 docids per posting list anyway.
52         uint32_t last_docid = -1;
53 };
54
55 void PostingListBuilder::add_docid(uint32_t docid)
56 {
57         // Deduplicate against the last inserted value, if any.
58         if (docid == last_docid) {
59                 return;
60         }
61
62         pending_deltas.push_back(docid - last_docid - 1);
63         last_docid = docid;
64         if (pending_deltas.size() == 128) {
65                 append_block();
66                 pending_deltas.clear();
67                 num_docids += 128;
68         }
69 }
70
71 void PostingListBuilder::add_first_docid(uint32_t docid)
72 {
73         write_header(docid);
74         ++num_docids;
75         last_docid = docid;
76 }
77
78 void PostingListBuilder::finish()
79 {
80         if (pending_deltas.empty()) {
81                 return;
82         }
83
84         assert(!encoded.empty());  // write_header() should already have run.
85
86         // No interleaving for partial blocks.
87         unsigned char buf[P4NENC_BOUND(128)];
88         unsigned char *end = encode_pfor_single_block<128>(pending_deltas.data(), pending_deltas.size(), /*interleaved=*/false, buf);
89         encoded.insert(encoded.end(), buf, end);
90
91         num_docids += pending_deltas.size();
92         pending_deltas.clear();
93 }
94
95 void PostingListBuilder::append_block()
96 {
97         unsigned char buf[P4NENC_BOUND(128)];
98         assert(pending_deltas.size() == 128);
99         unsigned char *end = encode_pfor_single_block<128>(pending_deltas.data(), 128, /*interleaved=*/true, buf);
100         encoded.insert(encoded.end(), buf, end);
101 }
102
103 void PostingListBuilder::write_header(uint32_t docid)
104 {
105         unsigned char buf[P4NENC_BOUND(1)];
106         unsigned char *end = write_baseval(docid, buf);
107         encoded.insert(encoded.end(), buf, end);
108 }
109
110 void DictionaryBuilder::add_file(string filename, dir_time)
111 {
112         if (keep_current_block) {  // Only bother saving the filenames if we're actually keeping the block.
113                 if (!current_block.empty()) {
114                         current_block.push_back('\0');
115                 }
116                 current_block += filename;
117         }
118         if (++num_files_in_block == block_size) {
119                 flush_block();
120         }
121 }
122
123 void DictionaryBuilder::flush_block()
124 {
125         if (keep_current_block) {
126                 if (slot_for_current_block == -1) {
127                         lengths.push_back(current_block.size());
128                         sampled_blocks.push_back(move(current_block));
129                 } else {
130                         lengths[slot_for_current_block] = current_block.size();
131                         sampled_blocks[slot_for_current_block] = move(current_block);
132                 }
133         }
134         current_block.clear();
135         num_files_in_block = 0;
136         ++block_num;
137
138         if (block_num < blocks_to_keep) {
139                 keep_current_block = true;
140                 slot_for_current_block = -1;
141         } else {
142                 // Keep every block with equal probability (reservoir sampling).
143                 uint64_t idx = uniform_int_distribution<uint64_t>(0, block_num)(reservoir_rand);
144                 keep_current_block = (idx < blocks_to_keep);
145                 slot_for_current_block = idx;
146         }
147 }
148
149 string DictionaryBuilder::train(size_t buf_size)
150 {
151         string dictionary_buf;
152         sort(sampled_blocks.begin(), sampled_blocks.end());  // Seemingly important for decompression speed.
153         for (const string &block : sampled_blocks) {
154                 dictionary_buf += block;
155         }
156
157         string buf;
158         buf.resize(buf_size);
159         size_t ret = ZDICT_trainFromBuffer(&buf[0], buf_size, dictionary_buf.data(), lengths.data(), lengths.size());
160         if (ZDICT_isError(ret)) {
161                 return "";
162         }
163         dprintf("Sampled %zu bytes in %zu blocks, built a dictionary of size %zu\n", dictionary_buf.size(), lengths.size(), ret);
164         buf.resize(ret);
165
166         sampled_blocks.clear();
167         lengths.clear();
168
169         return buf;
170 }
171
172 class EncodingCorpus : public DatabaseReceiver {
173 public:
174         EncodingCorpus(FILE *outfp, size_t block_size, ZSTD_CDict *cdict, bool store_dir_times);
175         ~EncodingCorpus();
176
177         void add_file(std::string filename, dir_time dt) override;
178         void flush_block() override;
179         void finish() override;
180
181         std::vector<uint64_t> filename_blocks;
182         size_t num_files = 0, num_files_in_block = 0, num_blocks = 0;
183         bool seen_trigram(uint32_t trgm)
184         {
185                 return invindex[trgm] != nullptr;
186         }
187         size_t num_files_seen() const override { return num_files; }
188         PostingListBuilder &get_pl_builder(uint32_t trgm)
189         {
190                 return *invindex[trgm];
191         }
192
193         void add_docid(uint32_t trgm, uint32_t docid)
194         {
195                 if (invindex[trgm] == nullptr) {
196                         invindex[trgm] = new PostingListBuilder;
197                         invindex[trgm]->add_first_docid(docid);
198                 } else {
199                         invindex[trgm]->add_docid(docid);
200                 }
201         }
202
203         size_t num_trigrams() const;
204         std::string get_compressed_dir_times();
205
206 private:
207         void compress_dir_times(size_t allowed_slop);
208
209         std::unique_ptr<PostingListBuilder *[]> invindex;
210         FILE *outfp;
211         off_t outfp_pos;  // Cheaper than calling ftell(outfp) all the time.
212         std::string current_block;
213         std::string tempbuf;
214         const size_t block_size;
215         const bool store_dir_times;
216         ZSTD_CDict *cdict;
217
218         ZSTD_CStream *dir_time_ctx = nullptr;
219         std::string dir_times;  // Buffer of still-uncompressed data.
220         std::string dir_times_compressed;
221 };
222
223
224 EncodingCorpus::EncodingCorpus(FILE *outfp, size_t block_size, ZSTD_CDict *cdict, bool store_dir_times)
225         : invindex(new PostingListBuilder *[NUM_TRIGRAMS]), outfp(outfp), outfp_pos(ftell(outfp)), block_size(block_size), store_dir_times(store_dir_times), cdict(cdict)
226 {
227         fill(invindex.get(), invindex.get() + NUM_TRIGRAMS, nullptr);
228         if (store_dir_times) {
229                 dir_time_ctx = ZSTD_createCStream();
230                 ZSTD_initCStream(dir_time_ctx, /*level=*/6);
231         }
232 }
233
234 EncodingCorpus::~EncodingCorpus()
235 {
236         for (unsigned i = 0; i < NUM_TRIGRAMS; ++i) {
237                 delete invindex[i];
238         }
239 }
240
241 void EncodingCorpus::add_file(string filename, dir_time dt)
242 {
243         ++num_files;
244         if (!current_block.empty()) {
245                 current_block.push_back('\0');
246         }
247         current_block += filename;
248         if (++num_files_in_block == block_size) {
249                 flush_block();
250         }
251
252         if (store_dir_times) {
253                 if (dt.sec == -1) {
254                         // Not a directory.
255                         dir_times.push_back('\0');
256                 } else {
257                         dir_times.push_back('\1');
258                         dir_times.append(reinterpret_cast<char *>(&dt.sec), sizeof(dt.sec));
259                         dir_times.append(reinterpret_cast<char *>(&dt.nsec), sizeof(dt.nsec));
260                 }
261                 compress_dir_times(/*allowed_slop=*/4096);
262         }
263 }
264
265 void EncodingCorpus::compress_dir_times(size_t allowed_slop)
266 {
267         while (dir_times.size() >= allowed_slop) {
268                 size_t old_size = dir_times_compressed.size();
269                 dir_times_compressed.resize(old_size + 4096);
270
271                 ZSTD_outBuffer outbuf;
272                 outbuf.dst = dir_times_compressed.data() + old_size;
273                 outbuf.size = 4096;
274                 outbuf.pos = 0;
275
276                 ZSTD_inBuffer inbuf;
277                 inbuf.src = dir_times.data();
278                 inbuf.size = dir_times.size();
279                 inbuf.pos = 0;
280
281                 int ret = ZSTD_compressStream(dir_time_ctx, &outbuf, &inbuf);
282                 if (ret < 0) {
283                         fprintf(stderr, "ZSTD_compressStream() failed\n");
284                         exit(1);
285                 }
286
287                 dir_times_compressed.resize(old_size + outbuf.pos);
288                 dir_times.erase(dir_times.begin(), dir_times.begin() + inbuf.pos);
289
290                 if (outbuf.pos == 0 && inbuf.pos == 0) {
291                         // Nothing happened (not enough data?), try again later.
292                         return;
293                 }
294         }
295 }
296
297 void EncodingCorpus::flush_block()
298 {
299         if (current_block.empty()) {
300                 return;
301         }
302
303         uint32_t docid = num_blocks;
304
305         // Create trigrams.
306         const char *ptr = current_block.c_str();
307         const char *end = ptr + current_block.size();
308         while (ptr < end - 3) {  // Must be at least one filename left, that's at least three bytes.
309                 if (ptr[0] == '\0') {
310                         // This filename is zero bytes, so skip it (and the zero terminator).
311                         ++ptr;
312                         continue;
313                 } else if (ptr[1] == '\0') {
314                         // This filename is one byte, so skip it (and the zero terminator).
315                         ptr += 2;
316                         continue;
317                 } else if (ptr[2] == '\0') {
318                         // This filename is two bytes, so skip it (and the zero terminator).
319                         ptr += 3;
320                         continue;
321                 }
322                 for ( ;; ) {
323                         // NOTE: Will read one byte past the end of the trigram, but it's OK,
324                         // since we always call it from contexts where there's a terminating zero byte.
325                         uint32_t trgm;
326                         memcpy(&trgm, ptr, sizeof(trgm));
327                         ++ptr;
328                         trgm = le32toh(trgm);
329                         add_docid(trgm & 0xffffff, docid);
330                         if (trgm <= 0xffffff) {
331                                 // Terminating zero byte, so we're done with this filename.
332                                 // Skip the remaining two bytes, and the zero terminator.
333                                 ptr += 3;
334                                 break;
335                         }
336                 }
337         }
338
339         // Compress and add the filename block.
340         filename_blocks.push_back(outfp_pos);
341         string compressed = zstd_compress(current_block, cdict, &tempbuf);
342         if (fwrite(compressed.data(), compressed.size(), 1, outfp) != 1) {
343                 perror("fwrite()");
344                 exit(1);
345         }
346         outfp_pos += compressed.size();
347
348         current_block.clear();
349         num_files_in_block = 0;
350         ++num_blocks;
351 }
352
353 void EncodingCorpus::finish()
354 {
355         flush_block();
356 }
357
358 size_t EncodingCorpus::num_trigrams() const
359 {
360         size_t num = 0;
361         for (unsigned trgm = 0; trgm < NUM_TRIGRAMS; ++trgm) {
362                 if (invindex[trgm] != nullptr) {
363                         ++num;
364                 }
365         }
366         return num;
367 }
368
369 string EncodingCorpus::get_compressed_dir_times()
370 {
371         if (!store_dir_times) {
372                 return "";
373         }
374         compress_dir_times(/*allowed_slop=*/0);
375         assert(dir_times.empty());
376
377         for (;;) {
378                 size_t old_size = dir_times_compressed.size();
379                 dir_times_compressed.resize(old_size + 4096);
380
381                 ZSTD_outBuffer outbuf;
382                 outbuf.dst = dir_times_compressed.data() + old_size;
383                 outbuf.size = 4096;
384                 outbuf.pos = 0;
385
386                 int ret = ZSTD_endStream(dir_time_ctx, &outbuf);
387                 if (ret < 0) {
388                         fprintf(stderr, "ZSTD_compressStream() failed\n");
389                         exit(1);
390                 }
391
392                 dir_times_compressed.resize(old_size + outbuf.pos);
393
394                 if (ret == 0) {
395                         // All done.
396                         break;
397                 }
398         }
399
400         return dir_times_compressed;
401 }
402
403 string zstd_compress(const string &src, ZSTD_CDict *cdict, string *tempbuf)
404 {
405         static ZSTD_CCtx *ctx = nullptr;
406         if (ctx == nullptr) {
407                 ctx = ZSTD_createCCtx();
408         }
409
410         size_t max_size = ZSTD_compressBound(src.size());
411         if (tempbuf->size() < max_size) {
412                 tempbuf->resize(max_size);
413         }
414         size_t size;
415         if (cdict == nullptr) {
416                 size = ZSTD_compressCCtx(ctx, &(*tempbuf)[0], max_size, src.data(), src.size(), /*level=*/6);
417         } else {
418                 size = ZSTD_compress_usingCDict(ctx, &(*tempbuf)[0], max_size, src.data(), src.size(), cdict);
419         }
420         return string(tempbuf->data(), size);
421 }
422
423 bool is_prime(uint32_t x)
424 {
425         if ((x % 2) == 0 || (x % 3) == 0) {
426                 return false;
427         }
428         uint32_t limit = ceil(sqrt(x));
429         for (uint32_t factor = 5; factor <= limit; ++factor) {
430                 if ((x % factor) == 0) {
431                         return false;
432                 }
433         }
434         return true;
435 }
436
437 uint32_t next_prime(uint32_t x)
438 {
439         if ((x % 2) == 0) {
440                 ++x;
441         }
442         while (!is_prime(x)) {
443                 x += 2;
444         }
445         return x;
446 }
447
448 unique_ptr<Trigram[]> create_hashtable(EncodingCorpus &corpus, const vector<uint32_t> &all_trigrams, uint32_t ht_size, uint32_t num_overflow_slots)
449 {
450         unique_ptr<Trigram[]> ht(new Trigram[ht_size + num_overflow_slots + 1]);  // 1 for the sentinel element at the end.
451         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
452                 ht[i].trgm = uint32_t(-1);
453                 ht[i].num_docids = 0;
454                 ht[i].offset = 0;
455         }
456         for (uint32_t trgm : all_trigrams) {
457                 // We don't know offset yet, so set it to zero.
458                 Trigram to_insert{ trgm, uint32_t(corpus.get_pl_builder(trgm).get_num_docids()), 0 };
459
460                 uint32_t bucket = hash_trigram(trgm, ht_size);
461                 unsigned distance = 0;
462                 while (ht[bucket].num_docids != 0) {
463                         // Robin Hood hashing; reduces the longest distance by a lot.
464                         unsigned other_distance = bucket - hash_trigram(ht[bucket].trgm, ht_size);
465                         if (distance > other_distance) {
466                                 swap(to_insert, ht[bucket]);
467                                 distance = other_distance;
468                         }
469
470                         ++bucket, ++distance;
471                         if (distance > num_overflow_slots) {
472                                 return nullptr;
473                         }
474                 }
475                 ht[bucket] = to_insert;
476         }
477         return ht;
478 }
479
480 DatabaseBuilder::DatabaseBuilder(const char *outfile, gid_t owner, int block_size, string dictionary, bool check_visibility)
481         : outfile(outfile), block_size(block_size)
482 {
483         umask(0027);
484
485         string path = outfile;
486         path.resize(path.find_last_of('/') + 1);
487         if (path.empty()) {
488                 path = ".";
489         }
490         int fd = -1;
491 #ifdef O_TMPFILE
492         fd = open(path.c_str(), O_WRONLY | O_TMPFILE, 0640);
493         if (fd == -1 && errno != EOPNOTSUPP) {
494                 perror(path.c_str());
495                 exit(1);
496         }
497 #endif
498         if (fd == -1) {
499                 temp_filename = string(outfile) + ".XXXXXX";
500                 fd = mkstemp(&temp_filename[0]);
501                 if (fd == -1) {
502                         perror(temp_filename.c_str());
503                         exit(1);
504                 }
505                 if (fchmod(fd, 0640) == -1) {
506                         perror("fchmod");
507                         exit(1);
508                 }
509         }
510
511         if (owner != (gid_t)-1) {
512                 if (fchown(fd, (uid_t)-1, owner) == -1) {
513                         perror("fchown");
514                         exit(1);
515                 }
516         }
517
518         outfp = fdopen(fd, "wb");
519         if (outfp == nullptr) {
520                 perror(outfile);
521                 exit(1);
522         }
523
524         // Write the header.
525         memcpy(hdr.magic, "\0plocate", 8);
526         hdr.version = -1;  // Mark as broken.
527         hdr.hashtable_size = 0;  // Not known yet.
528         hdr.extra_ht_slots = num_overflow_slots;
529         hdr.num_docids = 0;
530         hdr.hash_table_offset_bytes = -1;  // We don't know these offsets yet.
531         hdr.max_version = 2;
532         hdr.filename_index_offset_bytes = -1;
533         hdr.zstd_dictionary_length_bytes = -1;
534         hdr.check_visibility = check_visibility;
535         fwrite(&hdr, sizeof(hdr), 1, outfp);
536
537         if (dictionary.empty()) {
538                 hdr.zstd_dictionary_offset_bytes = 0;
539                 hdr.zstd_dictionary_length_bytes = 0;
540         } else {
541                 hdr.zstd_dictionary_offset_bytes = ftell(outfp);
542                 fwrite(dictionary.data(), dictionary.size(), 1, outfp);
543                 hdr.zstd_dictionary_length_bytes = dictionary.size();
544                 cdict = ZSTD_createCDict(dictionary.data(), dictionary.size(), /*level=*/6);
545         }
546
547         hdr.directory_data_length_bytes = 0;
548         hdr.directory_data_offset_bytes = 0;
549         hdr.next_zstd_dictionary_length_bytes = 0;
550         hdr.next_zstd_dictionary_offset_bytes = 0;
551         hdr.conf_block_length_bytes = 0;
552         hdr.conf_block_offset_bytes = 0;
553 }
554
555 DatabaseReceiver *DatabaseBuilder::start_corpus(bool store_dir_times)
556 {
557         corpus_start = steady_clock::now();
558         corpus = new EncodingCorpus(outfp, block_size, cdict, store_dir_times);
559         return corpus;
560 }
561
562 void DatabaseBuilder::set_next_dictionary(std::string next_dictionary)
563 {
564         this->next_dictionary = move(next_dictionary);
565 }
566
567 void DatabaseBuilder::set_conf_block(std::string conf_block)
568 {
569         this->conf_block = move(conf_block);
570 }
571
572 void DatabaseBuilder::finish_corpus()
573 {
574         corpus->finish();
575         hdr.num_docids = corpus->filename_blocks.size();
576
577         // Stick an empty block at the end as sentinel.
578         corpus->filename_blocks.push_back(ftell(outfp));
579         const size_t bytes_for_filenames = corpus->filename_blocks.back() - corpus->filename_blocks.front();
580
581         // Write the offsets to the filenames.
582         hdr.filename_index_offset_bytes = ftell(outfp);
583         const size_t bytes_for_filename_index = corpus->filename_blocks.size() * sizeof(uint64_t);
584         fwrite(corpus->filename_blocks.data(), corpus->filename_blocks.size(), sizeof(uint64_t), outfp);
585         corpus->filename_blocks.clear();
586         corpus->filename_blocks.shrink_to_fit();
587
588         // Finish up encoding the posting lists.
589         size_t trigrams = 0, longest_posting_list = 0;
590         size_t bytes_for_posting_lists = 0;
591         for (unsigned trgm = 0; trgm < NUM_TRIGRAMS; ++trgm) {
592                 if (!corpus->seen_trigram(trgm))
593                         continue;
594                 PostingListBuilder &pl_builder = corpus->get_pl_builder(trgm);
595                 pl_builder.finish();
596                 longest_posting_list = max(longest_posting_list, pl_builder.get_num_docids());
597                 trigrams += pl_builder.get_num_docids();
598                 bytes_for_posting_lists += pl_builder.encoded.size();
599         }
600         size_t num_trigrams = corpus->num_trigrams();
601         dprintf("%zu files, %zu different trigrams, %zu entries, avg len %.2f, longest %zu\n",
602                 corpus->num_files, num_trigrams, trigrams, double(trigrams) / num_trigrams, longest_posting_list);
603         dprintf("%zu bytes used for posting lists (%.2f bits/entry)\n", bytes_for_posting_lists, 8 * bytes_for_posting_lists / double(trigrams));
604
605         dprintf("Building posting lists took %.1f ms.\n\n", 1e3 * duration<float>(steady_clock::now() - corpus_start).count());
606
607         // Find the used trigrams.
608         vector<uint32_t> all_trigrams;
609         for (unsigned trgm = 0; trgm < NUM_TRIGRAMS; ++trgm) {
610                 if (corpus->seen_trigram(trgm)) {
611                         all_trigrams.push_back(trgm);
612                 }
613         }
614
615         // Create the hash table.
616         unique_ptr<Trigram[]> hashtable;
617         uint32_t ht_size = next_prime(all_trigrams.size());
618         for (;;) {
619                 hashtable = create_hashtable(*corpus, all_trigrams, ht_size, num_overflow_slots);
620                 if (hashtable == nullptr) {
621                         dprintf("Failed creating hash table of size %u, increasing by 5%% and trying again.\n", ht_size);
622                         ht_size = next_prime(ht_size * 1.05);
623                 } else {
624                         dprintf("Created hash table of size %u.\n\n", ht_size);
625                         break;
626                 }
627         }
628
629         // Find the offsets for each posting list.
630         size_t bytes_for_hashtable = (ht_size + num_overflow_slots + 1) * sizeof(Trigram);
631         uint64_t offset = ftell(outfp) + bytes_for_hashtable;
632         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
633                 hashtable[i].offset = offset;  // Needs to be there even for empty slots.
634                 if (hashtable[i].num_docids == 0) {
635                         continue;
636                 }
637
638                 const vector<unsigned char> &encoded = corpus->get_pl_builder(hashtable[i].trgm).encoded;
639                 offset += encoded.size();
640         }
641
642         // Write the hash table.
643         hdr.hash_table_offset_bytes = ftell(outfp);
644         hdr.hashtable_size = ht_size;
645         fwrite(hashtable.get(), ht_size + num_overflow_slots + 1, sizeof(Trigram), outfp);
646
647         // Write the actual posting lists.
648         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
649                 if (hashtable[i].num_docids == 0) {
650                         continue;
651                 }
652                 const vector<unsigned char> &encoded = corpus->get_pl_builder(hashtable[i].trgm).encoded;
653                 fwrite(encoded.data(), encoded.size(), 1, outfp);
654         }
655
656         // Finally, write the directory times (for updatedb).
657         string compressed_dir_times = corpus->get_compressed_dir_times();
658         size_t bytes_for_compressed_dir_times = 0;
659         if (!compressed_dir_times.empty()) {
660                 hdr.directory_data_offset_bytes = ftell(outfp);
661                 hdr.directory_data_length_bytes = compressed_dir_times.size();
662                 fwrite(compressed_dir_times.data(), compressed_dir_times.size(), 1, outfp);
663                 bytes_for_compressed_dir_times = compressed_dir_times.size();
664                 compressed_dir_times.clear();
665         }
666
667         // Write the recommended dictionary for next update.
668         if (!next_dictionary.empty()) {
669                 hdr.next_zstd_dictionary_offset_bytes = ftell(outfp);
670                 hdr.next_zstd_dictionary_length_bytes = next_dictionary.size();
671                 fwrite(next_dictionary.data(), next_dictionary.size(), 1, outfp);
672         }
673
674         // And the configuration block.
675         if (!conf_block.empty()) {
676                 hdr.conf_block_offset_bytes = ftell(outfp);
677                 hdr.conf_block_length_bytes = conf_block.size();
678                 fwrite(conf_block.data(), conf_block.size(), 1, outfp);
679         }
680
681         // Rewind, and write the updated header.
682         hdr.version = 1;
683         fseek(outfp, 0, SEEK_SET);
684         fwrite(&hdr, sizeof(hdr), 1, outfp);
685
686         if (!temp_filename.empty()) {
687                 if (rename(temp_filename.c_str(), outfile.c_str()) == -1) {
688                         perror("rename");
689                         exit(1);
690                 }
691         } else {
692 #ifdef O_TMPFILE
693                 // Give the file a proper name, making it visible in the file system.
694                 // TODO: It would be nice to be able to do this atomically, like with rename.
695                 unlink(outfile.c_str());
696                 char procpath[256];
697                 snprintf(procpath, sizeof(procpath), "/proc/self/fd/%d", fileno(outfp));
698                 if (linkat(AT_FDCWD, procpath, AT_FDCWD, outfile.c_str(), AT_SYMLINK_FOLLOW) == -1) {
699                         perror("linkat");
700                         exit(1);
701                 }
702 #endif
703         }
704
705         fclose(outfp);
706
707         size_t total_bytes = (bytes_for_hashtable + bytes_for_posting_lists + bytes_for_filename_index + bytes_for_filenames + bytes_for_compressed_dir_times);
708
709         dprintf("Block size:     %7d files\n", block_size);
710         dprintf("Dictionary:     %'7.1f MB\n", hdr.zstd_dictionary_length_bytes / 1048576.0);
711         dprintf("Hash table:     %'7.1f MB\n", bytes_for_hashtable / 1048576.0);
712         dprintf("Posting lists:  %'7.1f MB\n", bytes_for_posting_lists / 1048576.0);
713         dprintf("Filename index: %'7.1f MB\n", bytes_for_filename_index / 1048576.0);
714         dprintf("Filenames:      %'7.1f MB\n", bytes_for_filenames / 1048576.0);
715         if (bytes_for_compressed_dir_times != 0) {
716                 dprintf("Modify times:   %'7.1f MB\n", bytes_for_compressed_dir_times / 1048576.0);
717         }
718         dprintf("Total:          %'7.1f MB\n", total_bytes / 1048576.0);
719         dprintf("\n");
720 }