]> git.sesse.net Git - plocate/blob - database-builder.cpp
Remove an unused member. Saves ~0.5% CPU due to better cache efficiency.
[plocate] / database-builder.cpp
1 #include "database-builder.h"
2
3 #include "dprintf.h"
4 #include "turbopfor-encode.h"
5
6 #include <algorithm>
7 #include <assert.h>
8 #include <fcntl.h>
9 #include <string.h>
10 #include <string_view>
11 #include <sys/stat.h>
12 #include <sys/time.h>
13 #include <sys/types.h>
14 #include <unistd.h>
15 #include <zdict.h>
16 #include <zstd.h>
17
18 #define P4NENC_BOUND(n) ((n + 127) / 128 + (n + 32) * sizeof(uint32_t))
19
20 #define NUM_TRIGRAMS 16777216
21
22 using namespace std;
23 using namespace std::chrono;
24
25 constexpr unsigned num_overflow_slots = 16;
26
27 string zstd_compress(const string &src, ZSTD_CDict *cdict, string *tempbuf);
28
29 static inline uint32_t read_unigram(const string_view s, size_t idx)
30 {
31         if (idx < s.size()) {
32                 return (unsigned char)s[idx];
33         } else {
34                 return 0;
35         }
36 }
37
38 static inline uint32_t read_trigram(const string_view s, size_t start)
39 {
40         return read_unigram(s, start) |
41                 (read_unigram(s, start + 1) << 8) |
42                 (read_unigram(s, start + 2) << 16);
43 }
44
45 class PostingListBuilder {
46 public:
47         inline void add_docid(uint32_t docid);
48         void finish();
49
50         string encoded;
51         size_t num_docids = 0;
52
53 private:
54         void write_header(uint32_t docid);
55         void append_block();
56
57         vector<uint32_t> pending_deltas;
58
59         uint32_t last_docid = -1;
60 };
61
62 void PostingListBuilder::add_docid(uint32_t docid)
63 {
64         // Deduplicate against the last inserted value, if any.
65         if (docid == last_docid) {
66                 return;
67         }
68
69         if (num_docids == 0) {
70                 // Very first docid.
71                 write_header(docid);
72                 ++num_docids;
73                 last_docid = docid;
74                 return;
75         }
76
77         pending_deltas.push_back(docid - last_docid - 1);
78         last_docid = docid;
79         if (pending_deltas.size() == 128) {
80                 append_block();
81                 pending_deltas.clear();
82         }
83         ++num_docids;
84 }
85
86 void PostingListBuilder::finish()
87 {
88         if (pending_deltas.empty()) {
89                 return;
90         }
91
92         assert(!encoded.empty());  // write_header() should already have run.
93
94         // No interleaving for partial blocks.
95         unsigned char buf[P4NENC_BOUND(128)];
96         unsigned char *end = encode_pfor_single_block<128>(pending_deltas.data(), pending_deltas.size(), /*interleaved=*/false, buf);
97         encoded.append(reinterpret_cast<char *>(buf), reinterpret_cast<char *>(end));
98 }
99
100 void PostingListBuilder::append_block()
101 {
102         unsigned char buf[P4NENC_BOUND(128)];
103         assert(pending_deltas.size() == 128);
104         unsigned char *end = encode_pfor_single_block<128>(pending_deltas.data(), 128, /*interleaved=*/true, buf);
105         encoded.append(reinterpret_cast<char *>(buf), reinterpret_cast<char *>(end));
106 }
107
108 void PostingListBuilder::write_header(uint32_t docid)
109 {
110         unsigned char buf[P4NENC_BOUND(1)];
111         unsigned char *end = write_baseval(docid, buf);
112         encoded.append(reinterpret_cast<char *>(buf), end - buf);
113 }
114
115 void DictionaryBuilder::add_file(string filename, dir_time)
116 {
117         if (keep_current_block) {  // Only bother saving the filenames if we're actually keeping the block.
118                 if (!current_block.empty()) {
119                         current_block.push_back('\0');
120                 }
121                 current_block += filename;
122         }
123         if (++num_files_in_block == block_size) {
124                 flush_block();
125         }
126 }
127
128 void DictionaryBuilder::flush_block()
129 {
130         if (keep_current_block) {
131                 if (slot_for_current_block == -1) {
132                         lengths.push_back(current_block.size());
133                         sampled_blocks.push_back(move(current_block));
134                 } else {
135                         lengths[slot_for_current_block] = current_block.size();
136                         sampled_blocks[slot_for_current_block] = move(current_block);
137                 }
138         }
139         current_block.clear();
140         num_files_in_block = 0;
141         ++block_num;
142
143         if (block_num < blocks_to_keep) {
144                 keep_current_block = true;
145                 slot_for_current_block = -1;
146         } else {
147                 // Keep every block with equal probability (reservoir sampling).
148                 uint64_t idx = uniform_int_distribution<uint64_t>(0, block_num)(reservoir_rand);
149                 keep_current_block = (idx < blocks_to_keep);
150                 slot_for_current_block = idx;
151         }
152 }
153
154 string DictionaryBuilder::train(size_t buf_size)
155 {
156         string dictionary_buf;
157         sort(sampled_blocks.begin(), sampled_blocks.end());  // Seemingly important for decompression speed.
158         for (const string &block : sampled_blocks) {
159                 dictionary_buf += block;
160         }
161
162         string buf;
163         buf.resize(buf_size);
164         size_t ret = ZDICT_trainFromBuffer(&buf[0], buf_size, dictionary_buf.data(), lengths.data(), lengths.size());
165         if (ZDICT_isError(ret)) {
166                 return "";
167         }
168         dprintf("Sampled %zu bytes in %zu blocks, built a dictionary of size %zu\n", dictionary_buf.size(), lengths.size(), ret);
169         buf.resize(ret);
170
171         sampled_blocks.clear();
172         lengths.clear();
173
174         return buf;
175 }
176
177 class EncodingCorpus : public DatabaseReceiver {
178 public:
179         EncodingCorpus(FILE *outfp, size_t block_size, ZSTD_CDict *cdict, bool store_dir_times);
180         ~EncodingCorpus();
181
182         void add_file(std::string filename, dir_time dt) override;
183         void flush_block() override;
184         void finish() override;
185
186         std::vector<uint64_t> filename_blocks;
187         size_t num_files = 0, num_files_in_block = 0, num_blocks = 0;
188         bool seen_trigram(uint32_t trgm)
189         {
190                 return invindex[trgm] != nullptr;
191         }
192         size_t num_files_seen() const override { return num_files; }
193         PostingListBuilder &get_pl_builder(uint32_t trgm)
194         {
195                 if (invindex[trgm] == nullptr) {
196                         invindex[trgm] = new PostingListBuilder;
197                 }
198                 return *invindex[trgm];
199         }
200
201         size_t num_trigrams() const;
202         std::string get_compressed_dir_times();
203
204 private:
205         void compress_dir_times(size_t allowed_slop);
206
207         std::unique_ptr<PostingListBuilder *[]> invindex;
208         FILE *outfp;
209         std::string current_block;
210         std::string tempbuf;
211         const size_t block_size;
212         const bool store_dir_times;
213         ZSTD_CDict *cdict;
214
215         ZSTD_CStream *dir_time_ctx = nullptr;
216         std::string dir_times;  // Buffer of still-uncompressed data.
217         std::string dir_times_compressed;
218 };
219
220
221 EncodingCorpus::EncodingCorpus(FILE *outfp, size_t block_size, ZSTD_CDict *cdict, bool store_dir_times)
222         : invindex(new PostingListBuilder *[NUM_TRIGRAMS]), outfp(outfp), block_size(block_size), store_dir_times(store_dir_times), cdict(cdict)
223 {
224         fill(invindex.get(), invindex.get() + NUM_TRIGRAMS, nullptr);
225         if (store_dir_times) {
226                 dir_time_ctx = ZSTD_createCStream();
227                 ZSTD_initCStream(dir_time_ctx, /*level=*/6);
228         }
229 }
230
231 EncodingCorpus::~EncodingCorpus()
232 {
233         for (unsigned i = 0; i < NUM_TRIGRAMS; ++i) {
234                 delete invindex[i];
235         }
236 }
237
238 void EncodingCorpus::add_file(string filename, dir_time dt)
239 {
240         ++num_files;
241         if (!current_block.empty()) {
242                 current_block.push_back('\0');
243         }
244         current_block += filename;
245         if (++num_files_in_block == block_size) {
246                 flush_block();
247         }
248
249         if (store_dir_times) {
250                 if (dt.sec == -1) {
251                         // Not a directory.
252                         dir_times.push_back('\0');
253                 } else {
254                         dir_times.push_back('\1');
255                         dir_times.append(reinterpret_cast<char *>(&dt.sec), sizeof(dt.sec));
256                         dir_times.append(reinterpret_cast<char *>(&dt.nsec), sizeof(dt.nsec));
257                 }
258                 compress_dir_times(/*allowed_slop=*/4096);
259         }
260 }
261
262 void EncodingCorpus::compress_dir_times(size_t allowed_slop)
263 {
264         while (dir_times.size() >= allowed_slop) {
265                 size_t old_size = dir_times_compressed.size();
266                 dir_times_compressed.resize(old_size + 4096);
267
268                 ZSTD_outBuffer outbuf;
269                 outbuf.dst = dir_times_compressed.data() + old_size;
270                 outbuf.size = 4096;
271                 outbuf.pos = 0;
272
273                 ZSTD_inBuffer inbuf;
274                 inbuf.src = dir_times.data();
275                 inbuf.size = dir_times.size();
276                 inbuf.pos = 0;
277
278                 int ret = ZSTD_compressStream(dir_time_ctx, &outbuf, &inbuf);
279                 if (ret < 0) {
280                         fprintf(stderr, "ZSTD_compressStream() failed\n");
281                         exit(1);
282                 }
283
284                 dir_times_compressed.resize(old_size + outbuf.pos);
285                 dir_times.erase(dir_times.begin(), dir_times.begin() + inbuf.pos);
286
287                 if (outbuf.pos == 0 && inbuf.pos == 0) {
288                         // Nothing happened (not enough data?), try again later.
289                         return;
290                 }
291         }
292 }
293
294 void EncodingCorpus::flush_block()
295 {
296         if (current_block.empty()) {
297                 return;
298         }
299
300         uint32_t docid = num_blocks;
301
302         // Create trigrams.
303         const char *ptr = current_block.c_str();
304         while (ptr < current_block.c_str() + current_block.size()) {
305                 string_view s(ptr);
306                 if (s.size() >= 3) {
307                         for (size_t j = 0; j < s.size() - 2; ++j) {
308                                 uint32_t trgm = read_trigram(s, j);
309                                 get_pl_builder(trgm).add_docid(docid);
310                         }
311                 }
312                 ptr += s.size() + 1;
313         }
314
315         // Compress and add the filename block.
316         filename_blocks.push_back(ftell(outfp));
317         string compressed = zstd_compress(current_block, cdict, &tempbuf);
318         if (fwrite(compressed.data(), compressed.size(), 1, outfp) != 1) {
319                 perror("fwrite()");
320                 exit(1);
321         }
322
323         current_block.clear();
324         num_files_in_block = 0;
325         ++num_blocks;
326 }
327
328 void EncodingCorpus::finish()
329 {
330         flush_block();
331 }
332
333 size_t EncodingCorpus::num_trigrams() const
334 {
335         size_t num = 0;
336         for (unsigned trgm = 0; trgm < NUM_TRIGRAMS; ++trgm) {
337                 if (invindex[trgm] != nullptr) {
338                         ++num;
339                 }
340         }
341         return num;
342 }
343
344 string EncodingCorpus::get_compressed_dir_times()
345 {
346         if (!store_dir_times) {
347                 return "";
348         }
349         compress_dir_times(/*allowed_slop=*/0);
350         assert(dir_times.empty());
351
352         for (;;) {
353                 size_t old_size = dir_times_compressed.size();
354                 dir_times_compressed.resize(old_size + 4096);
355
356                 ZSTD_outBuffer outbuf;
357                 outbuf.dst = dir_times_compressed.data() + old_size;
358                 outbuf.size = 4096;
359                 outbuf.pos = 0;
360
361                 int ret = ZSTD_endStream(dir_time_ctx, &outbuf);
362                 if (ret < 0) {
363                         fprintf(stderr, "ZSTD_compressStream() failed\n");
364                         exit(1);
365                 }
366
367                 dir_times_compressed.resize(old_size + outbuf.pos);
368
369                 if (ret == 0) {
370                         // All done.
371                         break;
372                 }
373         }
374
375         return dir_times_compressed;
376 }
377
378 string zstd_compress(const string &src, ZSTD_CDict *cdict, string *tempbuf)
379 {
380         static ZSTD_CCtx *ctx = nullptr;
381         if (ctx == nullptr) {
382                 ctx = ZSTD_createCCtx();
383         }
384
385         size_t max_size = ZSTD_compressBound(src.size());
386         if (tempbuf->size() < max_size) {
387                 tempbuf->resize(max_size);
388         }
389         size_t size;
390         if (cdict == nullptr) {
391                 size = ZSTD_compressCCtx(ctx, &(*tempbuf)[0], max_size, src.data(), src.size(), /*level=*/6);
392         } else {
393                 size = ZSTD_compress_usingCDict(ctx, &(*tempbuf)[0], max_size, src.data(), src.size(), cdict);
394         }
395         return string(tempbuf->data(), size);
396 }
397
398 bool is_prime(uint32_t x)
399 {
400         if ((x % 2) == 0 || (x % 3) == 0) {
401                 return false;
402         }
403         uint32_t limit = ceil(sqrt(x));
404         for (uint32_t factor = 5; factor <= limit; ++factor) {
405                 if ((x % factor) == 0) {
406                         return false;
407                 }
408         }
409         return true;
410 }
411
412 uint32_t next_prime(uint32_t x)
413 {
414         if ((x % 2) == 0) {
415                 ++x;
416         }
417         while (!is_prime(x)) {
418                 x += 2;
419         }
420         return x;
421 }
422
423 unique_ptr<Trigram[]> create_hashtable(EncodingCorpus &corpus, const vector<uint32_t> &all_trigrams, uint32_t ht_size, uint32_t num_overflow_slots)
424 {
425         unique_ptr<Trigram[]> ht(new Trigram[ht_size + num_overflow_slots + 1]);  // 1 for the sentinel element at the end.
426         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
427                 ht[i].trgm = uint32_t(-1);
428                 ht[i].num_docids = 0;
429                 ht[i].offset = 0;
430         }
431         for (uint32_t trgm : all_trigrams) {
432                 // We don't know offset yet, so set it to zero.
433                 Trigram to_insert{ trgm, uint32_t(corpus.get_pl_builder(trgm).num_docids), 0 };
434
435                 uint32_t bucket = hash_trigram(trgm, ht_size);
436                 unsigned distance = 0;
437                 while (ht[bucket].num_docids != 0) {
438                         // Robin Hood hashing; reduces the longest distance by a lot.
439                         unsigned other_distance = bucket - hash_trigram(ht[bucket].trgm, ht_size);
440                         if (distance > other_distance) {
441                                 swap(to_insert, ht[bucket]);
442                                 distance = other_distance;
443                         }
444
445                         ++bucket, ++distance;
446                         if (distance > num_overflow_slots) {
447                                 return nullptr;
448                         }
449                 }
450                 ht[bucket] = to_insert;
451         }
452         return ht;
453 }
454
455 DatabaseBuilder::DatabaseBuilder(const char *outfile, gid_t owner, int block_size, string dictionary, bool check_visibility)
456         : outfile(outfile), block_size(block_size)
457 {
458         umask(0027);
459
460         string path = outfile;
461         path.resize(path.find_last_of('/') + 1);
462         if (path.empty()) {
463                 path = ".";
464         }
465 #ifdef O_TMPFILE
466         int fd = open(path.c_str(), O_WRONLY | O_TMPFILE, 0640);
467         if (fd == -1) {
468                 perror(path.c_str());
469                 exit(1);
470         }
471 #else
472         temp_filename = string(outfile) + ".XXXXXX";
473         int fd = mkstemp(&temp_filename[0]);
474         if (fd == -1) {
475                 perror(temp_filename.c_str());
476                 exit(1);
477         }
478         if (fchmod(fd, 0640) == -1) {
479                 perror("fchmod");
480                 exit(1);
481         }
482 #endif
483
484         if (owner != (gid_t)-1) {
485                 if (fchown(fd, (uid_t)-1, owner) == -1) {
486                         perror("fchown");
487                         exit(1);
488                 }
489         }
490
491         outfp = fdopen(fd, "wb");
492         if (outfp == nullptr) {
493                 perror(outfile);
494                 exit(1);
495         }
496
497         // Write the header.
498         memcpy(hdr.magic, "\0plocate", 8);
499         hdr.version = -1;  // Mark as broken.
500         hdr.hashtable_size = 0;  // Not known yet.
501         hdr.extra_ht_slots = num_overflow_slots;
502         hdr.num_docids = 0;
503         hdr.hash_table_offset_bytes = -1;  // We don't know these offsets yet.
504         hdr.max_version = 2;
505         hdr.filename_index_offset_bytes = -1;
506         hdr.zstd_dictionary_length_bytes = -1;
507         hdr.check_visibility = check_visibility;
508         fwrite(&hdr, sizeof(hdr), 1, outfp);
509
510         if (dictionary.empty()) {
511                 hdr.zstd_dictionary_offset_bytes = 0;
512                 hdr.zstd_dictionary_length_bytes = 0;
513         } else {
514                 hdr.zstd_dictionary_offset_bytes = ftell(outfp);
515                 fwrite(dictionary.data(), dictionary.size(), 1, outfp);
516                 hdr.zstd_dictionary_length_bytes = dictionary.size();
517                 cdict = ZSTD_createCDict(dictionary.data(), dictionary.size(), /*level=*/6);
518         }
519
520         hdr.directory_data_length_bytes = 0;
521         hdr.directory_data_offset_bytes = 0;
522         hdr.next_zstd_dictionary_length_bytes = 0;
523         hdr.next_zstd_dictionary_offset_bytes = 0;
524         hdr.conf_block_length_bytes = 0;
525         hdr.conf_block_offset_bytes = 0;
526 }
527
528 DatabaseReceiver *DatabaseBuilder::start_corpus(bool store_dir_times)
529 {
530         corpus_start = steady_clock::now();
531         corpus = new EncodingCorpus(outfp, block_size, cdict, store_dir_times);
532         return corpus;
533 }
534
535 void DatabaseBuilder::set_next_dictionary(std::string next_dictionary)
536 {
537         this->next_dictionary = move(next_dictionary);
538 }
539
540 void DatabaseBuilder::set_conf_block(std::string conf_block)
541 {
542         this->conf_block = move(conf_block);
543 }
544
545 void DatabaseBuilder::finish_corpus()
546 {
547         corpus->finish();
548         hdr.num_docids = corpus->filename_blocks.size();
549
550         // Stick an empty block at the end as sentinel.
551         corpus->filename_blocks.push_back(ftell(outfp));
552         const size_t bytes_for_filenames = corpus->filename_blocks.back() - corpus->filename_blocks.front();
553
554         // Write the offsets to the filenames.
555         hdr.filename_index_offset_bytes = ftell(outfp);
556         const size_t bytes_for_filename_index = corpus->filename_blocks.size() * sizeof(uint64_t);
557         fwrite(corpus->filename_blocks.data(), corpus->filename_blocks.size(), sizeof(uint64_t), outfp);
558         corpus->filename_blocks.clear();
559         corpus->filename_blocks.shrink_to_fit();
560
561         // Finish up encoding the posting lists.
562         size_t trigrams = 0, longest_posting_list = 0;
563         size_t bytes_for_posting_lists = 0;
564         for (unsigned trgm = 0; trgm < NUM_TRIGRAMS; ++trgm) {
565                 if (!corpus->seen_trigram(trgm))
566                         continue;
567                 PostingListBuilder &pl_builder = corpus->get_pl_builder(trgm);
568                 pl_builder.finish();
569                 longest_posting_list = max(longest_posting_list, pl_builder.num_docids);
570                 trigrams += pl_builder.num_docids;
571                 bytes_for_posting_lists += pl_builder.encoded.size();
572         }
573         size_t num_trigrams = corpus->num_trigrams();
574         dprintf("%zu files, %zu different trigrams, %zu entries, avg len %.2f, longest %zu\n",
575                 corpus->num_files, num_trigrams, trigrams, double(trigrams) / num_trigrams, longest_posting_list);
576         dprintf("%zu bytes used for posting lists (%.2f bits/entry)\n", bytes_for_posting_lists, 8 * bytes_for_posting_lists / double(trigrams));
577
578         dprintf("Building posting lists took %.1f ms.\n\n", 1e3 * duration<float>(steady_clock::now() - corpus_start).count());
579
580         // Find the used trigrams.
581         vector<uint32_t> all_trigrams;
582         for (unsigned trgm = 0; trgm < NUM_TRIGRAMS; ++trgm) {
583                 if (corpus->seen_trigram(trgm)) {
584                         all_trigrams.push_back(trgm);
585                 }
586         }
587
588         // Create the hash table.
589         unique_ptr<Trigram[]> hashtable;
590         uint32_t ht_size = next_prime(all_trigrams.size());
591         for (;;) {
592                 hashtable = create_hashtable(*corpus, all_trigrams, ht_size, num_overflow_slots);
593                 if (hashtable == nullptr) {
594                         dprintf("Failed creating hash table of size %u, increasing by 5%% and trying again.\n", ht_size);
595                         ht_size = next_prime(ht_size * 1.05);
596                 } else {
597                         dprintf("Created hash table of size %u.\n\n", ht_size);
598                         break;
599                 }
600         }
601
602         // Find the offsets for each posting list.
603         size_t bytes_for_hashtable = (ht_size + num_overflow_slots + 1) * sizeof(Trigram);
604         uint64_t offset = ftell(outfp) + bytes_for_hashtable;
605         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
606                 hashtable[i].offset = offset;  // Needs to be there even for empty slots.
607                 if (hashtable[i].num_docids == 0) {
608                         continue;
609                 }
610
611                 const string &encoded = corpus->get_pl_builder(hashtable[i].trgm).encoded;
612                 offset += encoded.size();
613         }
614
615         // Write the hash table.
616         hdr.hash_table_offset_bytes = ftell(outfp);
617         hdr.hashtable_size = ht_size;
618         fwrite(hashtable.get(), ht_size + num_overflow_slots + 1, sizeof(Trigram), outfp);
619
620         // Write the actual posting lists.
621         for (unsigned i = 0; i < ht_size + num_overflow_slots + 1; ++i) {
622                 if (hashtable[i].num_docids == 0) {
623                         continue;
624                 }
625                 const string &encoded = corpus->get_pl_builder(hashtable[i].trgm).encoded;
626                 fwrite(encoded.data(), encoded.size(), 1, outfp);
627         }
628
629         // Finally, write the directory times (for updatedb).
630         string compressed_dir_times = corpus->get_compressed_dir_times();
631         size_t bytes_for_compressed_dir_times = 0;
632         if (!compressed_dir_times.empty()) {
633                 hdr.directory_data_offset_bytes = ftell(outfp);
634                 hdr.directory_data_length_bytes = compressed_dir_times.size();
635                 fwrite(compressed_dir_times.data(), compressed_dir_times.size(), 1, outfp);
636                 bytes_for_compressed_dir_times = compressed_dir_times.size();
637                 compressed_dir_times.clear();
638         }
639
640         // Write the recommended dictionary for next update.
641         if (!next_dictionary.empty()) {
642                 hdr.next_zstd_dictionary_offset_bytes = ftell(outfp);
643                 hdr.next_zstd_dictionary_length_bytes = next_dictionary.size();
644                 fwrite(next_dictionary.data(), next_dictionary.size(), 1, outfp);
645         }
646
647         // And the configuration block.
648         if (!conf_block.empty()) {
649                 hdr.conf_block_offset_bytes = ftell(outfp);
650                 hdr.conf_block_length_bytes = conf_block.size();
651                 fwrite(conf_block.data(), conf_block.size(), 1, outfp);
652         }
653
654         // Rewind, and write the updated header.
655         hdr.version = 1;
656         fseek(outfp, 0, SEEK_SET);
657         fwrite(&hdr, sizeof(hdr), 1, outfp);
658
659 #ifdef O_TMPFILE
660         // Give the file a proper name, making it visible in the file system.
661         // TODO: It would be nice to be able to do this atomically, like with rename.
662         unlink(outfile.c_str());
663         char procpath[256];
664         snprintf(procpath, sizeof(procpath), "/proc/self/fd/%d", fileno(outfp));
665         if (linkat(AT_FDCWD, procpath, AT_FDCWD, outfile.c_str(), AT_SYMLINK_FOLLOW) == -1) {
666                 perror("linkat");
667                 exit(1);
668         }
669 #else
670         if (rename(temp_filename.c_str(), outfile.c_str()) == -1) {
671                 perror("rename");
672                 exit(1);
673         }
674 #endif
675
676         fclose(outfp);
677
678         size_t total_bytes = (bytes_for_hashtable + bytes_for_posting_lists + bytes_for_filename_index + bytes_for_filenames + bytes_for_compressed_dir_times);
679
680         dprintf("Block size:     %7d files\n", block_size);
681         dprintf("Dictionary:     %'7.1f MB\n", hdr.zstd_dictionary_length_bytes / 1048576.0);
682         dprintf("Hash table:     %'7.1f MB\n", bytes_for_hashtable / 1048576.0);
683         dprintf("Posting lists:  %'7.1f MB\n", bytes_for_posting_lists / 1048576.0);
684         dprintf("Filename index: %'7.1f MB\n", bytes_for_filename_index / 1048576.0);
685         dprintf("Filenames:      %'7.1f MB\n", bytes_for_filenames / 1048576.0);
686         if (bytes_for_compressed_dir_times != 0) {
687                 dprintf("Modify times:   %'7.1f MB\n", bytes_for_compressed_dir_times / 1048576.0);
688         }
689         dprintf("Total:          %'7.1f MB\n", total_bytes / 1048576.0);
690         dprintf("\n");
691 }