+ atomic<uint64_t> matched{ 0 };
+
+ mutex mu;
+ condition_variable queue_added, queue_removed;
+ deque<tuple<int, int, string>> work_queue; // Under mu.
+ bool done = false; // Under mu.
+
+ unsigned num_threads = max<int>(sysconf(_SC_NPROCESSORS_ONLN) - 1, 1);
+ dprintf("Using %u worker threads for linear scan.\n", num_threads);
+ unique_ptr<WorkerThread[]> threads(new WorkerThread[num_threads]);
+ for (unsigned i = 0; i < num_threads; ++i) {
+ threads[i].t = thread([&threads, &mu, &queue_added, &queue_removed, &work_queue, &done, &offsets, &needles, &access_rx_cache, &matched, i] {
+ // regcomp() takes a lock on the regex, so each thread will need its own.
+ const vector<Needle> *use_needles = &needles;
+ vector<Needle> recompiled_needles;
+ if (i != 0 && patterns_are_regex) {
+ recompiled_needles = needles;
+ for (Needle &needle : recompiled_needles) {
+ needle.re = compile_regex(needle.str);
+ }
+ use_needles = &recompiled_needles;
+ }
+
+ WorkerThreadReceiver receiver(&threads[i]);
+ for (;;) {
+ uint32_t io_docid, last_docid;
+ string compressed;
+
+ {
+ unique_lock<mutex> lock(mu);
+ queue_added.wait(lock, [&work_queue, &done] { return !work_queue.empty() || done; });
+ if (done && work_queue.empty()) {
+ return;
+ }
+ tie(io_docid, last_docid, compressed) = move(work_queue.front());
+ work_queue.pop_front();
+ queue_removed.notify_all();
+ }
+
+ for (uint32_t docid = io_docid; docid < last_docid; ++docid) {
+ size_t relative_offset = offsets[docid] - offsets[io_docid];
+ size_t len = offsets[docid + 1] - offsets[docid];
+ scan_file_block(*use_needles, { &compressed[relative_offset], len }, &access_rx_cache, docid, &receiver, &matched);
+ }
+ }
+ });
+ }
+