/[svn]/linuxsampler/trunk/src/common/RingBuffer.h
ViewVC logotype

Contents of /linuxsampler/trunk/src/common/RingBuffer.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2384 - (show annotations) (download) (as text)
Fri Dec 14 16:04:49 2012 UTC (11 years, 4 months ago) by schoenebeck
File MIME type: text/x-c++hdr
File size: 21370 byte(s)
* Fixed variable underflow in VirtualMidiDevice, which caused graphical
  virtual keyboards in frontends / instrument editors being stuck.
* VirtualKeyboard: allow to dynamically adjust max. MIDI events.
* RingBuffer: added resize() method

1 /***************************************************************************
2 * *
3 * LinuxSampler - modular, streaming capable sampler *
4 * *
5 * Copyright (C) 2003, 2004 by Benno Senoner and Christian Schoenebeck *
6 * Copyright (C) 2005 - 2012 Christian Schoenebeck *
7 * *
8 * This program is free software; you can redistribute it and/or modify *
9 * it under the terms of the GNU General Public License as published by *
10 * the Free Software Foundation; either version 2 of the License, or *
11 * (at your option) any later version. *
12 * *
13 * This program is distributed in the hope that it will be useful, *
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
16 * GNU General Public License for more details. *
17 * *
18 * You should have received a copy of the GNU General Public License *
19 * along with this program; if not, write to the Free Software *
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, *
21 * MA 02111-1307 USA *
22 ***************************************************************************/
23
24 #ifndef RINGBUFFER_H
25 #define RINGBUFFER_H
26
27 #define DEFAULT_WRAP_ELEMENTS 0
28
29 #include <string.h>
30
31 #include "lsatomic.h"
32
33 using LinuxSampler::atomic;
34 using LinuxSampler::memory_order_relaxed;
35 using LinuxSampler::memory_order_acquire;
36 using LinuxSampler::memory_order_release;
37
38
39 /** @brief Real-time safe and type safe RingBuffer implementation.
40 *
41 * This constant size buffer can be used to send data from exactly one
42 * sender / writing thread to exactly one receiver / reading thread. It is
43 * real-time safe due to the fact that data is only allocated when this
44 * RingBuffer is created and no system level mechanisms are used for
45 * ensuring thread safety of this class.
46 *
47 * <b>Important:</b> There are two distinct behaviors of this RingBuffer
48 * which has to be given as template argument @c T_DEEP_COPY, which is a
49 * boolean flag:
50 *
51 * - @c true: The RingBuffer will copy elements of type @c T by using type
52 * @c T's assignment operator. This behavior is mandatory for all data
53 * structures (classes) which additionally allocate memory on the heap.
54 * Type @c T's needs to have an assignment operator implementation though,
55 * otherwise this will cause a compilation error. This behavior is more
56 * safe, but usually slower (except for very small buffer sizes, where it
57 * might be even faster).
58 * - @c false: The RingBuffer will copy elements of type @c T by flatly
59 * copying their structural data ( i.e. with @c memcpy() ) in one piece.
60 * This will only work if class @c T (and all of its subelements) does not
61 * allocate any additional data on the heap by itself. So use this option
62 * with great care, because otherwise it will result in very ugly behavior
63 * and crashes! For larger buffer sizes, this behavior will most probably
64 * be faster.
65 */
66 template<class T, bool T_DEEP_COPY>
67 class RingBuffer
68 {
69 public:
70 RingBuffer (int sz, int wrap_elements = DEFAULT_WRAP_ELEMENTS) :
71 write_ptr(0), read_ptr(0)
72 {
73 _allocBuffer(sz, wrap_elements);
74 }
75
76 /**
77 * Resize this ring buffer to the given size. This operation
78 * is not thread safe! Any operations using this RingBuffer
79 * have to be stopped before calling this method.
80 *
81 * @param sz - new size (amount of elements)
82 * @param wrap_elements - (optional) if supplied, the new amount
83 * of wrap elements to be used beyond
84 * official buffer end, if not provided
85 * the amount wrap_elements remains as it was
86 * before
87 */
88 void resize(int sz, int wrap_elements = -1) {
89 if (wrap_elements == -1)
90 wrap_elements = this->wrap_elements;
91
92 delete [] buf;
93
94 _allocBuffer(sz, wrap_elements);
95 }
96
97 virtual ~RingBuffer() {
98 delete [] buf;
99 }
100
101 /**
102 * Sets all remaining write space elements to zero. The write pointer
103 * will currently not be incremented after, but that might change in
104 * future.
105 *
106 * @e Caution: for @c T_DEEP_COPY=true you might probably @e NOT want
107 * to call this method at all, at least not in case type @c T allocates
108 * any additional data on the heap by itself.
109 */
110 inline void fill_write_space_with_null() {
111 int w = write_ptr.load(memory_order_relaxed),
112 r = read_ptr.load(memory_order_acquire);
113 memset(get_write_ptr(), 0, sizeof(T)*write_space_to_end());
114 if (r && w >= r) {
115 memset(get_buffer_begin(), 0, sizeof(T)*(r - 1));
116 }
117
118 // set the wrap space elements to null
119 if (wrap_elements) memset(&buf[size], 0, sizeof(T)*wrap_elements);
120 }
121
122 __inline int read (T *dest, int cnt);
123 __inline int write (T *src, int cnt);
124
125 inline int push(T* src) { return write(src,1); }
126 inline int pop(T* dst) { return read(dst,1); }
127
128 __inline T *get_buffer_begin();
129
130 __inline T *get_read_ptr(void) {
131 return(&buf[read_ptr.load(memory_order_relaxed)]);
132 }
133
134 /**
135 * Returns a pointer to the element from the current read position,
136 * advanced by \a offset elements.
137 */
138 /*inline T* get_read_ptr(int offset) {
139 int r = read_ptr.load(memory_order_relaxed);
140 r += offset;
141 r &= size_mask;
142 return &buf[r];
143 }*/
144
145 __inline T *get_write_ptr();
146 __inline void increment_read_ptr(int cnt) {
147 read_ptr.store((read_ptr.load(memory_order_relaxed) + cnt) & size_mask, memory_order_release);
148 }
149 __inline void set_read_ptr(int val) {
150 read_ptr.store(val, memory_order_release);
151 }
152
153 __inline void increment_write_ptr(int cnt) {
154 write_ptr.store((write_ptr.load(memory_order_relaxed) + cnt) & size_mask, memory_order_release);
155 }
156
157 /* this function increments the write_ptr by cnt, if the buffer wraps then
158 subtract size from the write_ptr value so that it stays within 0<write_ptr<size
159 use this function to increment the write ptr after you filled the buffer
160 with a number of elements given by write_space_to_end_with_wrap().
161 This ensures that the data that is written to the buffer fills up all
162 the wrap space that resides past the regular buffer. The wrap_space is needed for
163 interpolation. So that the audio thread sees the ringbuffer like a linear space
164 which allows us to use faster routines.
165 When the buffer wraps the wrap part is memcpy()ied to the beginning of the buffer
166 and the write ptr incremented accordingly.
167 */
168 __inline void increment_write_ptr_with_wrap(int cnt) {
169 int w = write_ptr.load(memory_order_relaxed);
170 w += cnt;
171 if(w >= size) {
172 w -= size;
173 copy(&buf[0], &buf[size], w);
174 //printf("DEBUG !!!! increment_write_ptr_with_wrap: buffer wrapped, elements wrapped = %d (wrap_elements %d)\n",w,wrap_elements);
175 }
176 write_ptr.store(w, memory_order_release);
177 }
178
179 /* this function returns the available write space in the buffer
180 when the read_ptr > write_ptr it returns the space inbetween, otherwise
181 when the read_ptr < write_ptr it returns the space between write_ptr and
182 the buffer end, including the wrap_space.
183 There is an exception to it. When read_ptr <= wrap_elements. In that
184 case we return the write space to buffer end (-1) without the wrap_elements,
185 this is needed because a subsequent increment_write_ptr which copies the
186 data that resides into the wrap space to the beginning of the buffer and increments
187 the write_ptr would cause the write_ptr overstepping the read_ptr which would be an error.
188 So basically the if(r<=wrap_elements) we return the buffer space to end - 1 which
189 ensures that at the next call there will be one element free to write before the buffer wraps
190 and usually (except in EOF situations) the next write_space_to_end_with_wrap() will return
191 1 + wrap_elements which ensures that the wrap part gets fully filled with data
192 */
193 __inline int write_space_to_end_with_wrap() {
194 int w, r;
195
196 w = write_ptr.load(memory_order_relaxed);
197 r = read_ptr.load(memory_order_acquire);
198 //printf("write_space_to_end: w=%d r=%d\n",w,r);
199 if(r > w) {
200 //printf("DEBUG: write_space_to_end_with_wrap: r>w r=%d w=%d val=%d\n",r,w,r - w - 1);
201 return(r - w - 1);
202 }
203 if(r <= wrap_elements) {
204 //printf("DEBUG: write_space_to_end_with_wrap: ATTENTION r <= wrap_elements: r=%d w=%d val=%d\n",r,w,size - w -1);
205 return(size - w -1);
206 }
207 if(r) {
208 //printf("DEBUG: write_space_to_end_with_wrap: r=%d w=%d val=%d\n",r,w,size - w + wrap_elements);
209 return(size - w + wrap_elements);
210 }
211 //printf("DEBUG: write_space_to_end_with_wrap: r=0 w=%d val=%d\n",w,size - w - 1 + wrap_elements);
212 return(size - w - 1 + wrap_elements);
213 }
214
215 /* this function adjusts the number of elements to write into the ringbuffer
216 in a way that the size boundary is avoided and that the wrap space always gets
217 entirely filled.
218 cnt contains the write_space_to_end_with_wrap() amount while
219 capped_cnt contains a capped amount of samples to read.
220 normally capped_cnt == cnt but in some cases eg when the disk thread needs
221 to refill tracks with smaller blocks because lots of streams require immediate
222 refill because lots of notes were started simultaneously.
223 In that case we set for example capped_cnt to a fixed amount (< cnt, eg 64k),
224 which helps to reduce the buffer refill latencies that occur between streams.
225 the first if() checks if the current write_ptr + capped_cnt resides within
226 the wrap area but is < size+wrap_elements. in that case we cannot return
227 capped_cnt because it would lead to a write_ptr wrapping and only a partial fill
228 of wrap space which would lead to errors. So we simply return cnt which ensures
229 that the the entire wrap space will get filled correctly.
230 In all other cases (which are not problematic because no write_ptr wrapping
231 occurs) we simply return capped_cnt.
232 */
233 __inline int adjust_write_space_to_avoid_boundary(int cnt, int capped_cnt) {
234 int w;
235 w = write_ptr.load(memory_order_relaxed);
236 if((w+capped_cnt) >= size && (w+capped_cnt) < (size+wrap_elements)) {
237 //printf("adjust_write_space_to_avoid_boundary returning cnt = %d\n",cnt);
238 return(cnt);
239 }
240 //printf("adjust_write_space_to_avoid_boundary returning capped_cnt = %d\n",capped_cnt);
241 return(capped_cnt);
242 }
243
244 __inline int write_space_to_end() {
245 int w, r;
246
247 w = write_ptr.load(memory_order_relaxed);
248 r = read_ptr.load(memory_order_acquire);
249 //printf("write_space_to_end: w=%d r=%d\n",w,r);
250 if(r > w) return(r - w - 1);
251 if(r) return(size - w);
252 return(size - w - 1);
253 }
254
255 __inline int read_space_to_end() {
256 int w, r;
257
258 w = write_ptr.load(memory_order_acquire);
259 r = read_ptr.load(memory_order_relaxed);
260 if(w >= r) return(w - r);
261 return(size - r);
262 }
263 __inline void init() {
264 write_ptr.store(0, memory_order_relaxed);
265 read_ptr.store(0, memory_order_relaxed);
266 // wrap=0;
267 }
268
269 int write_space () {
270 int w, r;
271
272 w = write_ptr.load(memory_order_relaxed);
273 r = read_ptr.load(memory_order_acquire);
274
275 if (w > r) {
276 return ((r - w + size) & size_mask) - 1;
277 } else if (w < r) {
278 return (r - w) - 1;
279 } else {
280 return size - 1;
281 }
282 }
283
284 int read_space () {
285 int w, r;
286
287 w = write_ptr.load(memory_order_acquire);
288 r = read_ptr.load(memory_order_relaxed);
289
290 if (w >= r) {
291 return w - r;
292 } else {
293 return (w - r + size) & size_mask;
294 }
295 }
296
297 int size;
298 int wrap_elements;
299
300 /**
301 * Independent, random access reading from a RingBuffer. This class
302 * allows to read from a RingBuffer without being forced to free read
303 * data while reading / positioning.
304 */
305 template<class T1, bool T1_DEEP_COPY>
306 class _NonVolatileReader {
307 public:
308 int read_space() {
309 int r = read_ptr;
310 int w = pBuf->write_ptr.load(memory_order_acquire);
311 return (w >= r) ? w - r : (w - r + pBuf->size) & pBuf->size_mask;
312 }
313
314 /**
315 * Prefix decrement operator, for reducing NonVolatileReader's
316 * read position by one.
317 */
318 inline void operator--() {
319 if (read_ptr == pBuf->read_ptr.load(memory_order_relaxed)) return; //TODO: or should we react oh this case (e.g. force segfault), as this is a very odd case?
320 read_ptr = (read_ptr-1) & pBuf->size_mask;
321 }
322
323 /**
324 * Postfix decrement operator, for reducing NonVolatileReader's
325 * read position by one.
326 */
327 inline void operator--(int) {
328 --*this;
329 }
330
331 /**
332 * Returns pointer to the RingBuffer data of current
333 * NonVolatileReader's read position and increments
334 * NonVolatileReader's read position by one.
335 *
336 * @returns pointer to element of current read position
337 */
338 T* pop() {
339 if (!read_space()) return NULL;
340 T* pData = &pBuf->buf[read_ptr];
341 read_ptr++;
342 read_ptr &= pBuf->size_mask;
343 return pData;
344 }
345
346 /**
347 * Reads one element from the NonVolatileReader's current read
348 * position and copies it to the variable pointed by \a dst and
349 * finally increments the NonVolatileReader's read position by
350 * one.
351 *
352 * @param dst - where the element is copied to
353 * @returns 1 on success, 0 otherwise
354 */
355 int pop(T* dst) { return read(dst,1); }
356
357 /**
358 * Reads \a cnt elements from the NonVolatileReader's current
359 * read position and copies it to the buffer pointed by \a dest
360 * and finally increments the NonVolatileReader's read position
361 * by the number of read elements.
362 *
363 * @param dest - destination buffer
364 * @param cnt - number of elements to read
365 * @returns number of read elements
366 */
367 int read(T* dest, int cnt) {
368 int free_cnt;
369 int cnt2;
370 int to_read;
371 int n1, n2;
372 int priv_read_ptr;
373
374 priv_read_ptr = read_ptr;
375
376 if ((free_cnt = read_space()) == 0) return 0;
377
378 to_read = cnt > free_cnt ? free_cnt : cnt;
379
380 cnt2 = priv_read_ptr + to_read;
381
382 if (cnt2 > pBuf->size) {
383 n1 = pBuf->size - priv_read_ptr;
384 n2 = cnt2 & pBuf->size_mask;
385 } else {
386 n1 = to_read;
387 n2 = 0;
388 }
389
390 copy(dest, &pBuf->buf[priv_read_ptr], n1);
391 priv_read_ptr = (priv_read_ptr + n1) & pBuf->size_mask;
392
393 if (n2) {
394 copy(dest+n1, pBuf->buf, n2);
395 priv_read_ptr = n2;
396 }
397
398 this->read_ptr = priv_read_ptr;
399 return to_read;
400 }
401
402 /**
403 * Finally when the read data is not needed anymore, this method
404 * should be called to free the data in the RingBuffer up to the
405 * current read position of this NonVolatileReader.
406 *
407 * @see RingBuffer::increment_read_ptr()
408 */
409 void free() {
410 pBuf->read_ptr.store(read_ptr, memory_order_release);
411 }
412
413 protected:
414 _NonVolatileReader(RingBuffer<T1,T1_DEEP_COPY>* pBuf) {
415 this->pBuf = pBuf;
416 this->read_ptr = pBuf->read_ptr.load(memory_order_relaxed);
417 }
418
419 RingBuffer<T1,T1_DEEP_COPY>* pBuf;
420 int read_ptr;
421
422 friend class RingBuffer<T1,T1_DEEP_COPY>;
423 };
424
425 typedef _NonVolatileReader<T,T_DEEP_COPY> NonVolatileReader;
426
427 NonVolatileReader get_non_volatile_reader() { return NonVolatileReader(this); }
428
429 protected:
430 T *buf;
431 atomic<int> write_ptr;
432 atomic<int> read_ptr;
433 int size_mask;
434
435 /**
436 * Copies \a n amount of elements from the buffer given by
437 * \a pSrc to the buffer given by \a pDst.
438 */
439 inline static void copy(T* pDst, T* pSrc, int n);
440
441 void _allocBuffer(int sz, int wrap_elements) {
442 this->wrap_elements = wrap_elements;
443
444 // the write-with-wrap functions need wrap_elements extra
445 // space in the buffer to be able to copy the wrap space
446 sz += wrap_elements;
447
448 int power_of_two;
449 for (power_of_two = 1;
450 1<<power_of_two < sz;
451 power_of_two++);
452
453 size = 1<<power_of_two;
454 size_mask = size;
455 size_mask -= 1;
456 buf = new T[size + wrap_elements];
457 }
458
459 friend class _NonVolatileReader<T,T_DEEP_COPY>;
460 };
461
462 template<class T, bool T_DEEP_COPY>
463 T* RingBuffer<T,T_DEEP_COPY>::get_write_ptr (void) {
464 return(&buf[write_ptr.load(memory_order_relaxed)]);
465 }
466
467 template<class T, bool T_DEEP_COPY>
468 T* RingBuffer<T,T_DEEP_COPY>::get_buffer_begin (void) {
469 return(buf);
470 }
471
472
473
474 template<class T, bool T_DEEP_COPY>
475 int RingBuffer<T,T_DEEP_COPY>::read(T* dest, int cnt)
476 {
477 int free_cnt;
478 int cnt2;
479 int to_read;
480 int n1, n2;
481 int priv_read_ptr;
482
483 priv_read_ptr = read_ptr.load(memory_order_relaxed);
484
485 if ((free_cnt = read_space ()) == 0) {
486 return 0;
487 }
488
489 to_read = cnt > free_cnt ? free_cnt : cnt;
490
491 cnt2 = priv_read_ptr + to_read;
492
493 if (cnt2 > size) {
494 n1 = size - priv_read_ptr;
495 n2 = cnt2 & size_mask;
496 } else {
497 n1 = to_read;
498 n2 = 0;
499 }
500
501 copy(dest, &buf[priv_read_ptr], n1);
502 priv_read_ptr = (priv_read_ptr + n1) & size_mask;
503
504 if (n2) {
505 copy(dest+n1, buf, n2);
506 priv_read_ptr = n2;
507 }
508
509 read_ptr.store(priv_read_ptr, memory_order_release);
510 return to_read;
511 }
512
513 template<class T, bool T_DEEP_COPY>
514 int RingBuffer<T,T_DEEP_COPY>::write(T* src, int cnt)
515 {
516 int free_cnt;
517 int cnt2;
518 int to_write;
519 int n1, n2;
520 int priv_write_ptr;
521
522 priv_write_ptr = write_ptr.load(memory_order_relaxed);
523
524 if ((free_cnt = write_space ()) == 0) {
525 return 0;
526 }
527
528 to_write = cnt > free_cnt ? free_cnt : cnt;
529
530 cnt2 = priv_write_ptr + to_write;
531
532 if (cnt2 > size) {
533 n1 = size - priv_write_ptr;
534 n2 = cnt2 & size_mask;
535 } else {
536 n1 = to_write;
537 n2 = 0;
538 }
539
540 copy(&buf[priv_write_ptr], src, n1);
541 priv_write_ptr = (priv_write_ptr + n1) & size_mask;
542
543 if (n2) {
544 copy(buf, src+n1, n2);
545 priv_write_ptr = n2;
546 }
547 write_ptr.store(priv_write_ptr, memory_order_release);
548 return to_write;
549 }
550
551 template<class T, bool T_DEEP_COPY>
552 void RingBuffer<T,T_DEEP_COPY>::copy(T* pDst, T* pSrc, int n) {
553 if (T_DEEP_COPY) { // deep copy - won't work for data structures without assignment operator implementation
554 for (int i = 0; i < n; i++) pDst[i] = pSrc[i];
555 } else { // flat copy - won't work for complex data structures !
556 memcpy(pDst, pSrc, n * sizeof(T));
557 }
558 }
559
560 #endif /* RINGBUFFER_H */

  ViewVC Help
Powered by ViewVC