From 756a91677c1565b1bd555722ea795f911106104f Mon Sep 17 00:00:00 2001 From: "Steinar H. Gunderson" Date: Sat, 13 Dec 2014 16:40:15 +0100 Subject: [PATCH] Redo mapping entirely; now dumps constant-sized shards for reducing. Much more consistent RAM usage, much less I/O since we pipe from pgn-extract. --- binloader.cpp | 167 ++++++++++++++++++++++++++++++-------------------- build-book.sh | 31 +++++----- 2 files changed, 118 insertions(+), 80 deletions(-) diff --git a/binloader.cpp b/binloader.cpp index 4408885..2ed60cb 100644 --- a/binloader.cpp +++ b/binloader.cpp @@ -1,6 +1,6 @@ //#define _GLIBCXX_PARALLEL -// Usage: ./binloader IN1 IN2 IN3 ... OUT NUM_BUCKETS +// Usage: ./binloader IN1 IN2 IN3 ... OUT NUM_BUCKETS NUM_POS_PER_SUBSHARD #include #include @@ -45,27 +45,107 @@ struct Element { } }; +struct ShardData { + vector elems; + unique_ptr arena; // Used to allocate bpfen. + int num_written_subshards = 0; +}; + +void write_subshard(const char *basename, ShardData* shard, int bucket) +{ + string buf; // Keep allocated. + char filename[256]; + snprintf(filename, sizeof(filename), "%s.part%04d.subshard%04d", + basename, bucket, shard->num_written_subshards++); + printf("Writing SSTable %s...\n", filename); + + sort(shard->elems.begin(), shard->elems.end()); + + mtbl_writer_options* wopt = mtbl_writer_options_init(); + mtbl_writer_options_set_compression(wopt, MTBL_COMPRESSION_SNAPPY); + mtbl_writer* mtbl = mtbl_writer_init(filename, wopt); + Count c; + unordered_set moves; + for (size_t i = 0; i < shard->elems.size(); ++i) { + const Element &e = shard->elems[i]; + if (e.result == WHITE) { + c.set_white(c.white() + 1); + } else if (e.result == DRAW) { + c.set_draw(c.draw() + 1); + } else if (e.result == BLACK) { + c.set_black(c.black() + 1); + } + if (e.white_elo >= 100 && e.black_elo >= 100) { + c.set_sum_white_elo(c.sum_white_elo() + e.white_elo); + c.set_sum_black_elo(c.sum_black_elo() + e.black_elo); + c.set_num_elo(c.num_elo() + 1); + } + if (!c.has_first_timestamp() || e.timestamp < c.first_timestamp()) { + if (e.timestamp != DUMMY_TIMESTAMP) { + c.set_first_timestamp(e.timestamp); + } + c.set_opening_num(e.opening_num); + c.set_pgn_file_num(e.file_num); + c.set_pgn_start_position(e.start_position); + } + if (!moves.count(e.move)) { + moves.insert(e.move); + c.add_move(e.move); + } + if (i == shard->elems.size() - 1 || + e.bpfen_len != shard->elems[i + 1].bpfen_len || + memcmp(e.bpfen, shard->elems[i + 1].bpfen, e.bpfen_len) != 0) { + c.SerializeToString(&buf); + mtbl_writer_add(mtbl, + (const uint8_t *)e.bpfen, e.bpfen_len, + (const uint8_t *)buf.data(), buf.size()); + c = Count(); + moves.clear(); + } + } + mtbl_writer_destroy(&mtbl); + + shard->elems.clear(); + shard->arena.reset(new Arena); +} + int main(int argc, char **argv) { - int num_buckets = atoi(argv[argc - 1]); + int num_buckets = atoi(argv[argc - 2]); + size_t num_pos_per_subshard = atoi(argv[argc - 1]); // 500000 is a reasonable value. - vector> elems; - elems.resize(num_buckets); + vector shards; + shards.resize(num_buckets); + + for (int i = 0; i < num_buckets; ++i) { + shards[i].elems.reserve(num_pos_per_subshard); + shards[i].arena.reset(new Arena); + } size_t num_elems = 0; - for (int i = 1; i < argc - 2; ++i) { - FILE *fp = fopen(argv[i], "rb"); - if (fp == NULL) { - perror(argv[i]); - exit(1); + for (int i = 1; i < argc - 3; ++i) { + FILE *fp; + if (strcmp(argv[i], "-") == 0) { + fp = stdin; + } else { + fp = fopen(argv[i], "rb"); + if (fp == NULL) { + perror(argv[i]); + exit(1); + } } for ( ;; ) { + char bpfen[256]; + int bpfen_len = getc(fp); if (bpfen_len == -1) { break; } - - char *bpfen = arena.alloc(bpfen_len); + if (bpfen_len >= int(sizeof(bpfen))) { + fprintf(stderr, "Overlong BPFEN (%d bytes)\n", bpfen_len); + // exit(1); + break; + } if (fread(bpfen, bpfen_len, 1, fp) != 1) { perror("fread()"); // exit(1); @@ -133,7 +213,8 @@ int main(int argc, char **argv) int bucket = hash_key_to_bucket(bpfen, bpfen_len, num_buckets); Element e; - e.bpfen = bpfen; + e.bpfen = shards[bucket].arena->alloc(bpfen_len); + memcpy(e.bpfen, bpfen, bpfen_len); e.bpfen_len = bpfen_len; strcpy(e.move, move); e.result = Result(r); @@ -143,67 +224,21 @@ int main(int argc, char **argv) e.file_num = file_num; e.timestamp = timestamp; e.start_position = start_position; - elems[bucket].push_back(e); + shards[bucket].elems.push_back(e); ++num_elems; + + if (shards[bucket].elems.size() >= num_pos_per_subshard) { + write_subshard(argv[argc - 3], &shards[bucket], bucket); + shards[bucket].elems.reserve(num_pos_per_subshard); + } } fclose(fp); printf("Read %ld elems\n", num_elems); } - printf("Sorting...\n"); - for (int i = 0; i < num_buckets; ++i) { - sort(elems[i].begin(), elems[i].end()); - } - - printf("Writing SSTables...\n"); - string buf; // Keep allocated. for (int i = 0; i < num_buckets; ++i) { - char filename[256]; - snprintf(filename, sizeof(filename), "%s.part%04d", argv[argc - 2], i); - - mtbl_writer_options* wopt = mtbl_writer_options_init(); - mtbl_writer_options_set_compression(wopt, MTBL_COMPRESSION_SNAPPY); - mtbl_writer* mtbl = mtbl_writer_init(filename, wopt); - Count c; - unordered_set moves; - for (size_t j = 0; j < elems[i].size(); ++j) { - const Element &e = elems[i][j]; - if (e.result == WHITE) { - c.set_white(c.white() + 1); - } else if (e.result == DRAW) { - c.set_draw(c.draw() + 1); - } else if (e.result == BLACK) { - c.set_black(c.black() + 1); - } - if (e.white_elo >= 100 && e.black_elo >= 100) { - c.set_sum_white_elo(c.sum_white_elo() + e.white_elo); - c.set_sum_black_elo(c.sum_black_elo() + e.black_elo); - c.set_num_elo(c.num_elo() + 1); - } - if (!c.has_first_timestamp() || e.timestamp < c.first_timestamp()) { - if (e.timestamp != DUMMY_TIMESTAMP) { - c.set_first_timestamp(e.timestamp); - } - c.set_opening_num(e.opening_num); - c.set_pgn_file_num(e.file_num); - c.set_pgn_start_position(e.start_position); - } - if (!moves.count(e.move)) { - moves.insert(e.move); - c.add_move(e.move); - } - if (j == elems[i].size() - 1 || - e.bpfen_len != elems[i][j + 1].bpfen_len || - memcmp(e.bpfen, elems[i][j + 1].bpfen, e.bpfen_len) != 0) { - c.SerializeToString(&buf); - mtbl_writer_add(mtbl, - (const uint8_t *)e.bpfen, e.bpfen_len, - (const uint8_t *)buf.data(), buf.size()); - c = Count(); - moves.clear(); - } - } - mtbl_writer_destroy(&mtbl); + write_subshard(argv[argc - 3], &shards[i], i); } } + diff --git a/build-book.sh b/build-book.sh index 60d5710..67181c6 100755 --- a/build-book.sh +++ b/build-book.sh @@ -3,29 +3,32 @@ set -e export SHARDS=40 -export PARALLEL_LOADS=15 # Reduce if you have problems with OOM export PARALLEL_MERGES=40 -rm -f pgnnames.txt part-*.bin part-*.mtbl part-*.mtbl.part???? open.mtbl.new open.mtbl.part???? open.mtbl.part????.new 2>/dev/null +rm -f pgnnames.txt tmp.mtbl* open.mtbl.new open.mtbl.part???? open.mtbl.part????.new 2>/dev/null -PGNNUM=0 for FILE in "$@"; do - date | tr -d "\n" - echo " $FILE" - ./parallel-parse-pgn.sh "$FILE" "$PGNNUM" echo "$FILE" >> pgnnames.txt - PGNNUM=$(( PGNNUM + 1 )) done -date -echo "Loading..." -parallel -i -j ${PARALLEL_LOADS} bash -c 'FILE="{}"; set -x; ./binloader "$FILE" "${FILE/bin/mtbl}" $SHARDS' -- part-*.bin - -rm -f part-*.bin +for X in $( seq 0 39 ); do + ( + ( + PGNNUM=0 + for FILE in "$@"; do + START=$( ./find-pgn-split-point.sh "$FILE" $X 40 ) + END=$( ./find-pgn-split-point.sh "$FILE" $(( X + 1 )) 40 ) + ~/nmu/pgn-extract/pgn-extract --startpos $START --endpos $END --startfilenum $PGNNUM -e -Wsessebin "$FILE" 2>/dev/null + PGNNUM=$(( PGNNUM + 1 )) + done + ) | ./binloader - tmp.mtbl.mapper$X $SHARDS 500000 + ) & +done +wait -parallel -i -j ${PARALLEL_MERGES} bash -c 'X={}; set -x; ./binmerger part-*.mtbl.part$( printf %04d $X ) open.mtbl.part$( printf %04d $X ).new' -- $( seq 0 $(( SHARDS - 1 )) ) +parallel -i -j ${PARALLEL_MERGES} bash -c 'X={}; set -x; ./binmerger tmp.mtbl.mapper*.part$( printf %04d $X ).subshard* open.mtbl.part$( printf %04d $X ).new' -- $( seq 0 $(( SHARDS - 1 )) ) for X in $( seq 0 $(( SHARDS - 1 )) ); do mv open.mtbl.part$( printf %04d $X ).new open.mtbl.part$( printf %04d $X ) done -rm -f part-*.mtbl.part???? +rm -f tmp.mtbl* -- 2.39.2