2 Copyright 2005-2011 Intel Corporation. All Rights Reserved.
4 This file is part of Threading Building Blocks.
6 Threading Building Blocks is free software; you can redistribute it
7 and/or modify it under the terms of the GNU General Public License
8 version 2 as published by the Free Software Foundation.
10 Threading Building Blocks is distributed in the hope that it will be
11 useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with Threading Building Blocks; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 As a special exception, you may use this file as part of a free software
20 library without restriction. Specifically, if other files instantiate
21 templates or use macros or inline functions from this file, or you compile
22 this file and link it with other files to produce an executable, this
23 file does not by itself cause the resulting executable to be covered by
24 the GNU General Public License. This exception does not however
25 invalidate any other reasons why the executable file might be covered by
26 the GNU General Public License.
29 #ifndef __TBB_reader_writer_lock_H
30 #define __TBB_reader_writer_lock_H
32 #include "tbb_thread.h"
33 #include "tbb_allocator.h"
37 namespace interface5 {
38 //! Writer-preference reader-writer lock with local-only spinning on readers.
39 /** Loosely adapted from Mellor-Crummey and Scott pseudocode at
40 http://www.cs.rochester.edu/research/synchronization/pseudocode/rw.html#s_wp
41 @ingroup synchronization */
42 class reader_writer_lock : tbb::internal::no_copy {
44 friend class scoped_lock;
45 friend class scoped_lock_read;
46 //! Status type for nodes associated with lock instances
47 /** waiting_nonblocking: the wait state for nonblocking lock
48 instances; for writes, these transition straight to active
49 states; for reads, these are unused.
51 waiting: the start and spin state for all lock instances; these will
52 transition to active state when appropriate. Non-blocking write locks
53 transition from this state to waiting_nonblocking immediately.
55 active: the active state means that the lock instance holds
56 the lock; it will transition to invalid state during node deletion
58 invalid: the end state for all nodes; this is set in the
59 destructor so if we encounter this state, we are looking at
60 memory that has already been freed
62 The state diagrams below describe the status transitions.
63 Single arrows indicate that the thread that owns the node is
64 responsible for the transition; double arrows indicate that
65 any thread could make the transition.
67 State diagram for scoped_lock status:
69 waiting ----------> waiting_nonblocking
72 active -----------------> invalid
74 State diagram for scoped_lock_read status:
79 active ----------------->invalid
82 enum status_t { waiting_nonblocking, waiting, active, invalid };
84 //! Constructs a new reader_writer_lock
85 reader_writer_lock() {
89 //! Destructs a reader_writer_lock object
90 ~reader_writer_lock() {
94 //! The scoped lock pattern for write locks
95 /** Scoped locks help avoid the common problem of forgetting to release the lock.
96 This type also serves as the node for queuing locks. */
97 class scoped_lock : tbb::internal::no_copy {
99 friend class reader_writer_lock;
101 //! Construct with blocking attempt to acquire write lock on the passed-in lock
102 scoped_lock(reader_writer_lock& lock) {
103 internal_construct(lock);
106 //! Destructor, releases the write lock
111 void* operator new(size_t s) {
112 return tbb::internal::allocate_via_handler_v3(s);
114 void operator delete(void* p) {
115 tbb::internal::deallocate_via_handler_v3(p);
119 //! The pointer to the mutex to lock
120 reader_writer_lock *mutex;
121 //! The next queued competitor for the mutex
123 //! Status flag of the thread associated with this node
124 atomic<status_t> status;
126 //! Construct scoped_lock that is not holding lock
129 void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock&);
130 void __TBB_EXPORTED_METHOD internal_destroy();
133 //! The scoped lock pattern for read locks
134 class scoped_lock_read : tbb::internal::no_copy {
136 friend class reader_writer_lock;
138 //! Construct with blocking attempt to acquire read lock on the passed-in lock
139 scoped_lock_read(reader_writer_lock& lock) {
140 internal_construct(lock);
143 //! Destructor, releases the read lock
144 ~scoped_lock_read() {
148 void* operator new(size_t s) {
149 return tbb::internal::allocate_via_handler_v3(s);
151 void operator delete(void* p) {
152 tbb::internal::deallocate_via_handler_v3(p);
156 //! The pointer to the mutex to lock
157 reader_writer_lock *mutex;
158 //! The next queued competitor for the mutex
159 scoped_lock_read *next;
160 //! Status flag of the thread associated with this node
161 atomic<status_t> status;
163 //! Construct scoped_lock_read that is not holding lock
166 void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock&);
167 void __TBB_EXPORTED_METHOD internal_destroy();
170 //! Acquires the reader_writer_lock for write.
171 /** If the lock is currently held in write mode by another
172 context, the writer will block by spinning on a local
173 variable. Exceptions thrown: improper_lock The context tries
174 to acquire a reader_writer_lock that it already has write
176 void __TBB_EXPORTED_METHOD lock();
178 //! Tries to acquire the reader_writer_lock for write.
179 /** This function does not block. Return Value: True or false,
180 depending on whether the lock is acquired or not. If the lock
181 is already held by this acquiring context, try_lock() returns
183 bool __TBB_EXPORTED_METHOD try_lock();
185 //! Acquires the reader_writer_lock for read.
186 /** If the lock is currently held by a writer, this reader will
187 block and wait until the writers are done. Exceptions thrown:
188 improper_lock The context tries to acquire a
189 reader_writer_lock that it already has write ownership of. */
190 void __TBB_EXPORTED_METHOD lock_read();
192 //! Tries to acquire the reader_writer_lock for read.
193 /** This function does not block. Return Value: True or false,
194 depending on whether the lock is acquired or not. */
195 bool __TBB_EXPORTED_METHOD try_lock_read();
197 //! Releases the reader_writer_lock
198 void __TBB_EXPORTED_METHOD unlock();
201 void __TBB_EXPORTED_METHOD internal_construct();
202 void __TBB_EXPORTED_METHOD internal_destroy();
204 //! Attempts to acquire write lock
205 /** If unavailable, spins in blocking case, returns false in non-blocking case. */
206 bool start_write(scoped_lock *);
207 //! Sets writer_head to w and attempts to unblock
208 void set_next_writer(scoped_lock *w);
209 //! Relinquishes write lock to next waiting writer or group of readers
210 void end_write(scoped_lock *);
211 //! Checks if current thread holds write lock
212 bool is_current_writer();
214 //! Attempts to acquire read lock
215 /** If unavailable, spins in blocking case, returns false in non-blocking case. */
216 void start_read(scoped_lock_read *);
217 //! Unblocks pending readers
218 void unblock_readers();
219 //! Relinquishes read lock by decrementing counter; last reader wakes pending writer
222 //! The list of pending readers
223 atomic<scoped_lock_read*> reader_head;
224 //! The list of pending writers
225 atomic<scoped_lock*> writer_head;
226 //! The last node in the list of pending writers
227 atomic<scoped_lock*> writer_tail;
228 //! Writer that owns the mutex; tbb_thread::id() otherwise.
229 tbb_thread::id my_current_writer;
231 atomic<unsigned> rdr_count_and_flags;
234 } // namespace interface5
236 using interface5::reader_writer_lock;
240 #endif /* __TBB_reader_writer_lock_H */