string buf;
buf.resize(buf_size);
size_t ret = ZDICT_trainFromBuffer(&buf[0], buf_size, dictionary_buf.data(), lengths.data(), lengths.size());
+ if (ret == size_t(-1)) {
+ return "";
+ }
dprintf("Sampled %zu bytes in %zu blocks, built a dictionary of size %zu\n", dictionary_buf.size(), lengths.size(), ret);
buf.resize(ret);
}
}
-void read_mlocate(const char *filename, DatabaseReceiver *receiver)
+void read_plaintext(FILE *fp, DatabaseReceiver *receiver)
{
- FILE *fp = fopen(filename, "rb");
- if (fp == nullptr) {
- perror(filename);
+ if (fseek(fp, 0, SEEK_SET) != 0) {
+ perror("fseek");
+ exit(1);
+ }
+
+ while (!feof(fp)) {
+ char buf[1024];
+ if (fgets(buf, sizeof(buf), fp) == nullptr) {
+ break;
+ }
+ string s(buf);
+ assert(!s.empty());
+ while (s.back() != '\n' && !feof(fp)) {
+ // The string was longer than the buffer, so read again.
+ if (fgets(buf, sizeof(buf), fp) == nullptr) {
+ break;
+ }
+ s += buf;
+ }
+ if (!s.empty() && s.back() == '\n')
+ s.pop_back();
+ receiver->add_file(move(s));
+ }
+}
+
+void read_mlocate(FILE *fp, DatabaseReceiver *receiver)
+{
+ if (fseek(fp, 0, SEEK_SET) != 0) {
+ perror("fseek");
exit(1);
}
while (!feof(fp)) {
handle_directory(fp, receiver);
}
- fclose(fp);
}
string zstd_compress(const string &src, ZSTD_CDict *cdict, string *tempbuf)
return ht;
}
-void do_build(const char *infile, const char *outfile, int block_size)
-{
- steady_clock::time_point start __attribute__((unused)) = steady_clock::now();
+class DatabaseBuilder {
+public:
+ DatabaseBuilder(const char *outfile, int block_size, string dictionary);
+ Corpus *start_corpus();
+ void finish_corpus();
+
+private:
+ FILE *outfp;
+ Header hdr;
+ const int block_size;
+ steady_clock::time_point corpus_start;
+ Corpus *corpus = nullptr;
+ ZSTD_CDict *cdict = nullptr;
+};
+DatabaseBuilder::DatabaseBuilder(const char *outfile, int block_size, string dictionary)
+ : block_size(block_size)
+{
umask(0027);
- FILE *outfp = fopen(outfile, "wb");
+ outfp = fopen(outfile, "wb");
+ if (outfp == nullptr) {
+ perror(outfile);
+ exit(1);
+ }
// Write the header.
- Header hdr;
memcpy(hdr.magic, "\0plocate", 8);
hdr.version = -1; // Mark as broken.
hdr.hashtable_size = 0; // Not known yet.
hdr.zstd_dictionary_length_bytes = -1;
fwrite(&hdr, sizeof(hdr), 1, outfp);
- // Train the dictionary by sampling real blocks.
- // The documentation for ZDICT_trainFromBuffer() claims that a reasonable
- // dictionary size is ~100 kB, but 1 kB seems to actually compress better for us,
- // and decompress just as fast.
- DictionaryBuilder builder(/*blocks_to_keep=*/1000, block_size);
- read_mlocate(infile, &builder);
- string dictionary = builder.train(1024);
- ZSTD_CDict *cdict = ZSTD_createCDict(dictionary.data(), dictionary.size(), /*level=*/6);
-
- hdr.zstd_dictionary_offset_bytes = ftell(outfp);
- fwrite(dictionary.data(), dictionary.size(), 1, outfp);
- hdr.zstd_dictionary_length_bytes = dictionary.size();
-
- Corpus corpus(outfp, block_size, cdict);
- read_mlocate(infile, &corpus);
- if (false) { // To read a plain text file.
- FILE *fp = fopen(infile, "r");
- while (!feof(fp)) {
- char buf[1024];
- if (fgets(buf, 1024, fp) == nullptr || feof(fp)) {
- break;
- }
- string s(buf);
- if (s.back() == '\n')
- s.pop_back();
- corpus.add_file(move(s));
- }
- fclose(fp);
+ if (dictionary.empty()) {
+ hdr.zstd_dictionary_offset_bytes = 0;
+ hdr.zstd_dictionary_length_bytes = 0;
+ } else {
+ hdr.zstd_dictionary_offset_bytes = ftell(outfp);
+ fwrite(dictionary.data(), dictionary.size(), 1, outfp);
+ hdr.zstd_dictionary_length_bytes = dictionary.size();
+ cdict = ZSTD_createCDict(dictionary.data(), dictionary.size(), /*level=*/6);
}
- corpus.flush_block();
- dprintf("Read %zu files from %s\n", corpus.num_files, infile);
- hdr.num_docids = corpus.filename_blocks.size();
+}
+
+Corpus *DatabaseBuilder::start_corpus()
+{
+ corpus_start = steady_clock::now();
+ corpus = new Corpus(outfp, block_size, cdict);
+ return corpus;
+}
+
+void DatabaseBuilder::finish_corpus()
+{
+ corpus->flush_block();
+ hdr.num_docids = corpus->filename_blocks.size();
// Stick an empty block at the end as sentinel.
- corpus.filename_blocks.push_back(ftell(outfp));
- const size_t bytes_for_filenames = corpus.filename_blocks.back() - corpus.filename_blocks.front();
+ corpus->filename_blocks.push_back(ftell(outfp));
+ const size_t bytes_for_filenames = corpus->filename_blocks.back() - corpus->filename_blocks.front();
// Write the offsets to the filenames.
hdr.filename_index_offset_bytes = ftell(outfp);
- const size_t bytes_for_filename_index = corpus.filename_blocks.size() * sizeof(uint64_t);
- fwrite(corpus.filename_blocks.data(), corpus.filename_blocks.size(), sizeof(uint64_t), outfp);
- corpus.filename_blocks.clear();
- corpus.filename_blocks.shrink_to_fit();
+ const size_t bytes_for_filename_index = corpus->filename_blocks.size() * sizeof(uint64_t);
+ fwrite(corpus->filename_blocks.data(), corpus->filename_blocks.size(), sizeof(uint64_t), outfp);
+ corpus->filename_blocks.clear();
+ corpus->filename_blocks.shrink_to_fit();
// Finish up encoding the posting lists.
size_t trigrams = 0, longest_posting_list = 0;
size_t bytes_for_posting_lists = 0;
for (unsigned trgm = 0; trgm < NUM_TRIGRAMS; ++trgm) {
- if (!corpus.seen_trigram(trgm))
+ if (!corpus->seen_trigram(trgm))
continue;
- PostingListBuilder &pl_builder = corpus.get_pl_builder(trgm);
+ PostingListBuilder &pl_builder = corpus->get_pl_builder(trgm);
pl_builder.finish();
longest_posting_list = max(longest_posting_list, pl_builder.num_docids);
trigrams += pl_builder.num_docids;
bytes_for_posting_lists += pl_builder.encoded.size();
}
- size_t num_trigrams = corpus.num_trigrams();
+ size_t num_trigrams = corpus->num_trigrams();
dprintf("%zu files, %zu different trigrams, %zu entries, avg len %.2f, longest %zu\n",
- corpus.num_files, num_trigrams, trigrams, double(trigrams) / num_trigrams, longest_posting_list);
+ corpus->num_files, num_trigrams, trigrams, double(trigrams) / num_trigrams, longest_posting_list);
dprintf("%zu bytes used for posting lists (%.2f bits/entry)\n", bytes_for_posting_lists, 8 * bytes_for_posting_lists / double(trigrams));
- dprintf("Building posting lists took %.1f ms.\n\n", 1e3 * duration<float>(steady_clock::now() - start).count());
+ dprintf("Building posting lists took %.1f ms.\n\n", 1e3 * duration<float>(steady_clock::now() - corpus_start).count());
// Find the used trigrams.
vector<uint32_t> all_trigrams;
for (unsigned trgm = 0; trgm < NUM_TRIGRAMS; ++trgm) {
- if (corpus.seen_trigram(trgm)) {
+ if (corpus->seen_trigram(trgm)) {
all_trigrams.push_back(trgm);
}
}
unique_ptr<Trigram[]> hashtable;
uint32_t ht_size = next_prime(all_trigrams.size());
for (;;) {
- hashtable = create_hashtable(corpus, all_trigrams, ht_size, num_overflow_slots);
+ hashtable = create_hashtable(*corpus, all_trigrams, ht_size, num_overflow_slots);
if (hashtable == nullptr) {
dprintf("Failed creating hash table of size %u, increasing by 5%% and trying again.\n", ht_size);
ht_size = next_prime(ht_size * 1.05);
continue;
}
- const string &encoded = corpus.get_pl_builder(hashtable[i].trgm).encoded;
+ const string &encoded = corpus->get_pl_builder(hashtable[i].trgm).encoded;
offset += encoded.size();
}
if (hashtable[i].num_docids == 0) {
continue;
}
- const string &encoded = corpus.get_pl_builder(hashtable[i].trgm).encoded;
+ const string &encoded = corpus->get_pl_builder(hashtable[i].trgm).encoded;
fwrite(encoded.data(), encoded.size(), 1, outfp);
}
fwrite(&hdr, sizeof(hdr), 1, outfp);
fclose(outfp);
- size_t total_bytes __attribute__((unused)) = (bytes_for_hashtable + bytes_for_posting_lists + bytes_for_filename_index + bytes_for_filenames);
+ size_t total_bytes = (bytes_for_hashtable + bytes_for_posting_lists + bytes_for_filename_index + bytes_for_filenames);
dprintf("Block size: %7d files\n", block_size);
- dprintf("Dictionary: %'7.1f MB\n", dictionary.size() / 1048576.0);
+ dprintf("Dictionary: %'7.1f MB\n", hdr.zstd_dictionary_length_bytes / 1048576.0);
dprintf("Hash table: %'7.1f MB\n", bytes_for_hashtable / 1048576.0);
dprintf("Posting lists: %'7.1f MB\n", bytes_for_posting_lists / 1048576.0);
dprintf("Filename index: %'7.1f MB\n", bytes_for_filename_index / 1048576.0);
dprintf("\n");
}
+void do_build(const char *infile, const char *outfile, int block_size, bool plaintext)
+{
+ FILE *infp = fopen(infile, "rb");
+ if (infp == nullptr) {
+ perror(infile);
+ exit(1);
+ }
+
+ // Train the dictionary by sampling real blocks.
+ // The documentation for ZDICT_trainFromBuffer() claims that a reasonable
+ // dictionary size is ~100 kB, but 1 kB seems to actually compress better for us,
+ // and decompress just as fast.
+ DictionaryBuilder builder(/*blocks_to_keep=*/1000, block_size);
+ if (plaintext) {
+ read_plaintext(infp, &builder);
+ } else {
+ read_mlocate(infp, &builder);
+ }
+ string dictionary = builder.train(1024);
+
+ DatabaseBuilder db(outfile, block_size, dictionary);
+ Corpus *corpus = db.start_corpus();
+ if (plaintext) {
+ read_plaintext(infp, corpus);
+ } else {
+ read_mlocate(infp, corpus);
+ }
+ fclose(infp);
+
+ dprintf("Read %zu files from %s\n", corpus->num_files, infile);
+ db.finish_corpus();
+}
+
void usage()
{
printf(
"Normally, the destination should be /var/lib/mlocate/plocate.db.\n"
"\n"
" -b, --block-size SIZE number of filenames to store in each block (default 32)\n"
+ " -p, --plaintext input is a plaintext file, not an mlocate database\n"
" --help print this help\n"
" --version print version information\n");
}
{
static const struct option long_options[] = {
{ "block-size", required_argument, 0, 'b' },
+ { "plaintext", no_argument, 0, 'p' },
{ "help", no_argument, 0, 'h' },
{ "version", no_argument, 0, 'V' },
{ "debug", no_argument, 0, 'D' }, // Not documented.
};
int block_size = 32;
+ bool plaintext = false;
setlocale(LC_ALL, "");
for (;;) {
int option_index = 0;
- int c = getopt_long(argc, argv, "b:hVD", long_options, &option_index);
+ int c = getopt_long(argc, argv, "b:hpVD", long_options, &option_index);
if (c == -1) {
break;
}
case 'b':
block_size = atoi(optarg);
break;
+ case 'p':
+ plaintext = true;
+ break;
case 'h':
usage();
exit(0);
exit(1);
}
- do_build(argv[optind], argv[optind + 1], block_size);
+ do_build(argv[optind], argv[optind + 1], block_size, plaintext);
exit(EXIT_SUCCESS);
}