+
+ AccessRXCache access_rx_cache(nullptr);
+ Serializer serializer;
+ uint32_t num_blocks = corpus.get_num_filename_blocks();
+ unique_ptr<uint64_t[]> offsets(new uint64_t[num_blocks + 1]);
+ complete_pread(fd, offsets.get(), (num_blocks + 1) * sizeof(uint64_t), corpus.offset_for_block(0));
+ atomic<uint64_t> matched{0};
+
+ mutex mu;
+ condition_variable queue_added, queue_removed;
+ deque<tuple<int, int, string>> work_queue; // Under mu.
+ bool done = false; // Under mu.
+
+ unsigned num_threads = max<int>(sysconf(_SC_NPROCESSORS_ONLN) - 1, 1);
+ dprintf("Using %u worker threads for linear scan.\n", num_threads);
+ unique_ptr<WorkerThread[]> threads(new WorkerThread[num_threads]);
+ for (unsigned i = 0; i < num_threads; ++i) {
+ threads[i].t = thread([&threads, &mu, &queue_added, &queue_removed, &work_queue, &done, &offsets, &needles, &access_rx_cache, &matched, i] {
+ // regcomp() takes a lock on the regex, so each thread will need its own.
+ const vector<Needle> *use_needles = &needles;
+ vector<Needle> recompiled_needles;
+ if (i != 0 && patterns_are_regex) {
+ recompiled_needles = needles;
+ for (Needle &needle : recompiled_needles) {
+ needle.re = compile_regex(needle.str);
+ }
+ use_needles = &recompiled_needles;
+ }
+
+ WorkerThreadReceiver receiver(&threads[i]);
+ for (;;) {
+ uint32_t io_docid, last_docid;
+ string compressed;
+
+ {
+ unique_lock<mutex> lock(mu);
+ queue_added.wait(lock, [&work_queue, &done] { return !work_queue.empty() || done; });
+ if (done && work_queue.empty()) {
+ return;
+ }
+ tie(io_docid, last_docid, compressed) = move(work_queue.front());
+ work_queue.pop_front();
+ queue_removed.notify_all();
+ }
+
+ for (uint32_t docid = io_docid; docid < last_docid; ++docid) {
+ size_t relative_offset = offsets[docid] - offsets[io_docid];
+ size_t len = offsets[docid + 1] - offsets[docid];
+ scan_file_block(*use_needles, { &compressed[relative_offset], len }, &access_rx_cache, docid, &receiver, &matched);
+ }
+ }
+ });
+ }
+
+ string compressed;
+ for (uint32_t io_docid = 0; io_docid < num_blocks; io_docid += 32) {
+ uint32_t last_docid = std::min(io_docid + 32, num_blocks);
+ size_t io_len = offsets[last_docid] - offsets[io_docid];
+ if (compressed.size() < io_len) {
+ compressed.resize(io_len);
+ }
+ complete_pread(fd, &compressed[0], io_len, offsets[io_docid]);
+
+ {
+ unique_lock<mutex> lock(mu);
+ queue_removed.wait(lock, [&work_queue] { return work_queue.size() < 256; }); // Allow ~2MB of data queued up.
+ work_queue.emplace_back(io_docid, last_docid, move(compressed));
+ queue_added.notify_one(); // Avoid the thundering herd.
+ }
+
+ // Pick up some results, so that we are sure that we won't just overload.
+ // (Seemingly, going through all of these causes slowness with many threads,
+ // but taking only one is OK.)
+ unsigned i = io_docid / 32;
+ deliver_results(&threads[i % num_threads], &serializer);
+ }
+ {
+ lock_guard<mutex> lock(mu);
+ done = true;
+ queue_added.notify_all();
+ }
+ for (unsigned i = 0; i < num_threads; ++i) {
+ threads[i].t.join();
+ deliver_results(&threads[i], &serializer);
+ }