/[svn]/linuxsampler/trunk/src/common/SynchronizedConfig.h
ViewVC logotype

Contents of /linuxsampler/trunk/src/common/SynchronizedConfig.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2453 - (show annotations) (download) (as text)
Mon May 13 19:11:08 2013 UTC (10 years, 11 months ago) by schoenebeck
File MIME type: text/x-c++hdr
File size: 15000 byte(s)
* Added DoubleBuffer class, built on top of SynchronizedConfig, aiming to
  reduce the amount of code for protecting shared data. 

1 /***************************************************************************
2 * *
3 * Copyright (C) 2006-2012 Andreas Persson *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the Free Software *
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, *
18 * MA 02110-1301 USA *
19 ***************************************************************************/
20
21 #ifndef SYNCHRONIZEDCONFIG_H
22 #define SYNCHRONIZEDCONFIG_H
23
24 #include <set>
25 #include <unistd.h>
26 #include "lsatomic.h"
27 #include "Mutex.h"
28
29 namespace LinuxSampler {
30
31 /**
32 * Thread-safe management of configuration data, where the data is
33 * updated by a single non real time thread and read by a number
34 * of real time threads.
35 *
36 * The synchronization is achieved by using two instances of the
37 * configuration data. The non real time thread gets access to the
38 * instance not currently in use by the real time threads by
39 * calling GetConfigForUpdate(). After the data is updated, the
40 * non real time thread must call SwitchConfig() and redo the
41 * update on the other instance. SwitchConfig() blocks until it is
42 * safe to modify the other instance.
43 *
44 * The real time threads need one Reader object each to access the
45 * configuration data. This object must be created outside the
46 * real time thread. The Lock() function returns a reference to
47 * the data to be read, and Unlock() must be called when finished
48 * reading the data. (Neither Lock nor Unlock will block the real
49 * time thread, or use any system calls.)
50 *
51 * Note that the non real time side isn't safe for concurrent
52 * access, so if there are multiple non real time threads that
53 * update the configuration data, a mutex has to be used.
54 *
55 * Implementation note: the memory order parameters and fences are
56 * very carefully chosen to make the code fast but still safe for
57 * memory access reordering made by the CPU.
58 */
59 template<class T>
60 class SynchronizedConfig {
61 public:
62 SynchronizedConfig();
63
64 // methods for the real time thread
65
66 class Reader {
67 public:
68 /**
69 * Gets the configuration object for use by the
70 * real time thread. The object is safe to use
71 * (read only) until Unlock() is called.
72 *
73 * @returns a reference to the configuration
74 * object to be read by the real time
75 * thread
76 */
77 /*const*/ T& Lock() { //TODO const currently commented for the DoubleBuffer usage below
78 lock.store(lockCount += 2, memory_order_relaxed);
79 atomic_thread_fence(memory_order_seq_cst);
80 return parent->config[parent->indexAtomic.load(
81 memory_order_acquire)];
82 }
83
84 /**
85 * Unlock the configuration object. Unlock() must
86 * be called by the real time thread after it has
87 * finished reading the configuration object. If
88 * the non real time thread is waiting in
89 * SwitchConfig() it will be awaken when no real
90 * time threads are locked anymore.
91 */
92 void Unlock() {
93 lock.store(0, memory_order_release);
94 }
95
96 Reader(SynchronizedConfig& config);
97 Reader(SynchronizedConfig* config);
98 virtual ~Reader();
99 private:
100 friend class SynchronizedConfig;
101 SynchronizedConfig* parent;
102 int lockCount; // increased in every Lock(),
103 // lowest bit is always set.
104 atomic<int> lock; // equals lockCount when inside
105 // critical region, otherwise 0
106 Reader* next; // only used locally in SwitchConfig
107 int prevLock; // only used locally in SwitchConfig
108 };
109
110
111 // methods for the non real time thread
112
113 /**
114 * Gets the configuration object for use by the non real
115 * time thread. The object returned is not in use by the
116 * real time thread, so it can safely be updated. After
117 * the update is done, the non real time thread must call
118 * SwitchConfig() and the same update must be done again.
119 *
120 * @returns a reference to the configuration object to be
121 * updated by the non real time thread
122 */
123 T& GetConfigForUpdate();
124
125 /**
126 * Atomically switch the newly updated configuration
127 * object with the one used by the real time thread, then
128 * wait for the real time thread to finish working with
129 * the old object before returning the old object.
130 * SwitchConfig() must be called by the non real time
131 * thread after an update has been done, and the object
132 * returned must be updated in the same way as the first.
133 *
134 * @returns a reference to the configuration object to be
135 * updated by the non real time thread
136 */
137 T& SwitchConfig();
138
139 private:
140 atomic<int> indexAtomic;
141 int updateIndex;
142 T config[2];
143 std::set<Reader*> readers;
144 };
145
146 template<class T> SynchronizedConfig<T>::SynchronizedConfig() :
147 indexAtomic(0) {
148 updateIndex = 1;
149 }
150
151 template<class T> T& SynchronizedConfig<T>::GetConfigForUpdate() {
152 return config[updateIndex];
153 }
154
155 template<class T> T& SynchronizedConfig<T>::SwitchConfig() {
156 indexAtomic.store(updateIndex, memory_order_release);
157 atomic_thread_fence(memory_order_seq_cst);
158
159 // first put all locking readers in a linked list
160 Reader* lockingReaders = 0;
161 for (typename std::set<Reader*>::iterator iter = readers.begin() ;
162 iter != readers.end() ;
163 iter++) {
164 (*iter)->prevLock = (*iter)->lock.load(memory_order_acquire);
165 if ((*iter)->prevLock) {
166 (*iter)->next = lockingReaders;
167 lockingReaders = *iter;
168 }
169 }
170
171 // wait until there are no locking readers left
172 while (lockingReaders) {
173 usleep(50000);
174 Reader** prev = &lockingReaders;
175 for (Reader* p = lockingReaders ; p ; p = p->next) {
176 if (p->lock.load(memory_order_acquire) == p->prevLock) {
177 prev = &p->next;
178 } else {
179 *prev = p->next; // unlink
180 }
181 }
182 }
183
184 updateIndex ^= 1;
185 return config[updateIndex];
186 }
187
188
189 // ----- Reader ----
190
191 template <class T>
192 SynchronizedConfig<T>::Reader::Reader(SynchronizedConfig& config) :
193 parent(&config), lock(0), lockCount(1) {
194 parent->readers.insert(this);
195 }
196
197 template <class T>
198 SynchronizedConfig<T>::Reader::Reader(SynchronizedConfig* config) :
199 parent(config), lock(0), lockCount(1) {
200 parent->readers.insert(this);
201 }
202
203 template <class T>
204 SynchronizedConfig<T>::Reader::~Reader() {
205 parent->readers.erase(this);
206 }
207
208
209 // ----- Convenience classes on top of SynchronizedConfig ----
210
211
212 /**
213 * Base interface class for classes that implement synchronization of data
214 * shared between multiple threads.
215 */
216 template<class T>
217 class Synchronizer {
218 public:
219 /**
220 * Signal intention to enter a synchronized code block. Depending
221 * on the actual implementation, this call may block the calling
222 * thread until it is safe to actually use the protected data. After
223 * this call returns, it is safe for the calling thread to access and
224 * modify the shared data. As soon as the thread is done with accessing
225 * the shared data, it MUST call endSync().
226 *
227 * @return the shared protected data
228 */
229 virtual T& beginSync() = 0; //TODO: or call it lock() instead ?
230
231 /**
232 * Signal that the synchronized code block has been left. Depending
233 * on the actual implementation, this call may block the calling
234 * thread for a certain amount of time.
235 */
236 virtual void endSync() = 0; //TODO: or call it unlock() instead ?
237 };
238
239 /**
240 * Wraps as a kind of pointer class some data object shared with other
241 * threads, to protect / synchronize the shared data against
242 * undeterministic concurrent access. It does so by locking the shared
243 * data in the Sync constructor and unlocking the shared data in the Sync
244 * destructor. Accordingly it can always be considered safe to access the
245 * shared data during the whole life time of the Sync object. Due to
246 * this design, a Sync object MUST only be accessed and destroyed
247 * by exactly one and the same thread which created that same Sync object.
248 */
249 template<class T>
250 class Sync {
251 public:
252 Sync(Synchronizer<T>* syncer) {
253 this->syncer = syncer;
254 this->data = &syncer->beginSync();
255 }
256
257 virtual ~Sync() {
258 syncer->endSync();
259 }
260
261 Sync& operator =(const Sync& arg) {
262 *this->data = *arg.data;
263 return *this;
264 }
265
266 Sync& operator =(const T& arg) {
267 *this->data = arg;
268 return *this;
269 }
270
271 const T& operator *() const { return *data; }
272 T& operator *() { return *data; }
273
274 const T* operator ->() const { return data; }
275 T* operator ->() { return data; }
276
277 private:
278 Synchronizer<T>* syncer; ///< Points to the object that shall be responsible to protect the shared data.
279 T* data; ///< Points to the shared data that should be protected.
280 };
281
282 /**
283 * BackBuffer object to be accessed by multiple non-real-time threads.
284 *
285 * Since a back buffer is designed for being accessed by non-real-time
286 * threads, its methods involved may block the calling thread for a long
287 * amount of time.
288 */
289 template<class T>
290 class BackBuffer : public SynchronizedConfig<T>, public Synchronizer<T> {
291 public:
292 virtual T& beginSync() OVERRIDE {
293 mutex.Lock();
294 data = &SynchronizedConfig<T>::GetConfigForUpdate();
295 return *data;
296 }
297
298 virtual void endSync() OVERRIDE {
299 const T clone = *data;
300 SynchronizedConfig<T>::SwitchConfig() = clone;
301 mutex.Unlock();
302 }
303
304 private:
305 T* data;
306 Mutex mutex;
307 };
308
309 /**
310 * FrontBuffer object to be accessed by exactly ONE real-time thread.
311 * A front buffer is designed for real-time access. That is, its methods
312 * involved are lock free, that is none of them block the calling thread
313 * for a long time.
314 *
315 * If you need the front buffer's data to be accessed by multiple real-time
316 * threads instead, then you need to create multiple instances of the
317 * FrontBuffer object. They would point to the same data, but ensure
318 * protection against concurrent access among those real-time threads.
319 */
320 template<class T>
321 class FrontBuffer : public SynchronizedConfig<T>::Reader, public Synchronizer<T> {
322 public:
323 FrontBuffer(BackBuffer<T>& backBuffer) : SynchronizedConfig<T>::Reader::Reader(&backBuffer) {}
324 virtual T& beginSync() OVERRIDE { return SynchronizedConfig<T>::Reader::Lock(); }
325 virtual void endSync() OVERRIDE { SynchronizedConfig<T>::Reader::Unlock(); }
326 };
327
328 /**
329 * Synchronization / protection of data shared between multiple threads by
330 * using a double buffer design. The FrontBuffer is meant to be accessed by
331 * exactly one real-time thread, whereas the BackBuffer is meant to be
332 * accessed by multiple non-real-time threads.
333 *
334 * This class is built on top of SynchronizedConfig as convenient API to
335 * reduce the amount of code required to protect shared data.
336 */
337 template<class T>
338 class DoubleBuffer {
339 public:
340 DoubleBuffer() : m_front(m_back) {}
341
342 /**
343 * Synchronized access of the shared data for EXACTLY one real-time
344 * thread.
345 *
346 * The returned shared data is wrapped into a Sync object, which
347 * ensures that the shared data is protected against concurrent access
348 * during the life time of the returned Sync object.
349 */
350 inline
351 Sync<T> front() { return Sync<T>(&m_front); }
352
353 /**
354 * Synchronized access of the shared data for multiple non-real-time
355 * threads.
356 *
357 * The returned shared data is wrapped into a Sync object, which
358 * ensures that the shared data is protected against concurrent access
359 * during the life time of the returned Sync object.
360 *
361 * As soon as the returned Sync object is destroyed, the FrontBuffer
362 * will automatically be exchanged by the hereby modified BackBuffer.
363 */
364 inline
365 Sync<T> back() { return Sync<T>(&m_back); }
366
367 private:
368 BackBuffer<T> m_back; ///< Back buffer (non real-time thread(s) side).
369 FrontBuffer<T> m_front; ///< Front buffer (real-time thread side).
370 };
371
372 } // namespace LinuxSampler
373
374 #endif

  ViewVC Help
Powered by ViewVC