/[svn]/linuxsampler/trunk/src/common/RingBuffer.h
ViewVC logotype

Annotation of /linuxsampler/trunk/src/common/RingBuffer.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 970 - (hide annotations) (download) (as text)
Wed Dec 6 22:28:17 2006 UTC (17 years, 4 months ago) by schoenebeck
File MIME type: text/x-c++hdr
File size: 19719 byte(s)
* fixed crash occuring in conjunction with the new 'MAP MIDI_INSTRUMENT'
  LSCP command (cause: RingBuffer was not able to do deep copies)

1 schoenebeck 53 /***************************************************************************
2     * *
3     * LinuxSampler - modular, streaming capable sampler *
4     * *
5 schoenebeck 56 * Copyright (C) 2003, 2004 by Benno Senoner and Christian Schoenebeck *
6 schoenebeck 970 * Copyright (C) 2005, 2006 Christian Schoenebeck *
7 schoenebeck 53 * *
8     * This program is free software; you can redistribute it and/or modify *
9     * it under the terms of the GNU General Public License as published by *
10     * the Free Software Foundation; either version 2 of the License, or *
11     * (at your option) any later version. *
12     * *
13     * This program is distributed in the hope that it will be useful, *
14     * but WITHOUT ANY WARRANTY; without even the implied warranty of *
15     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
16     * GNU General Public License for more details. *
17     * *
18     * You should have received a copy of the GNU General Public License *
19     * along with this program; if not, write to the Free Software *
20     * Foundation, Inc., 59 Temple Place, Suite 330, Boston, *
21     * MA 02111-1307 USA *
22     ***************************************************************************/
23    
24     #ifndef RINGBUFFER_H
25     #define RINGBUFFER_H
26    
27     #define DEFAULT_WRAP_ELEMENTS 1024
28    
29     #include <string.h>
30    
31     #include "atomic.h"
32    
33 schoenebeck 970 /** @brief Real-time safe and type safe RingBuffer implementation.
34     *
35     * This constant size buffer can be used to send data from exactly one
36     * sender / writing thread to exactly one receiver / reading thread. It is
37     * real-time safe due to the fact that data is only allocated when this
38     * RingBuffer is created and no system level mechanisms are used for
39     * ensuring thread safety of this class.
40     *
41     * <b>Important:</b> There are two distinct behaviors of this RingBuffer
42     * which has to be given as template argument @c T_DEEP_COPY, which is a
43     * boolean flag:
44     *
45     * - @c true: The RingBuffer will copy elements of type @c T by using type
46     * @c T's assignment operator. This behavior is mandatory for all data
47     * structures (classes) which additionally allocate memory on the heap.
48     * Type @c T's needs to have an assignment operator implementation though,
49     * otherwise this will cause a compilation error. This behavior is more
50     * safe, but usually slower (except for very small buffer sizes, where it
51     * might be even faster).
52     * - @c false: The RingBuffer will copy elements of type @c T by flatly
53     * copying their structural data ( i.e. with @c memcpy() ) in one piece.
54     * This will only work if class @c T (and all of its subelements) does not
55     * allocate any additional data on the heap by itself. So use this option
56     * with great care, because otherwise it will result in very ugly behavior
57     * and crashes! For larger buffer sizes, this behavior will most probably
58     * be faster.
59     */
60     template<class T, bool T_DEEP_COPY>
61 schoenebeck 53 class RingBuffer
62     {
63     public:
64     RingBuffer (int sz, int wrap_elements = DEFAULT_WRAP_ELEMENTS) {
65     int power_of_two;
66    
67     this->wrap_elements = wrap_elements;
68    
69     for (power_of_two = 1;
70     1<<power_of_two < sz;
71     power_of_two++);
72    
73     size = 1<<power_of_two;
74     size_mask = size;
75     size_mask -= 1;
76     atomic_set(&write_ptr, 0);
77     atomic_set(&read_ptr, 0);
78     buf = new T[size + wrap_elements];
79     };
80    
81     virtual ~RingBuffer() {
82     delete [] buf;
83     }
84    
85     /**
86     * Sets all remaining write space elements to zero. The write pointer
87     * will currently not be incremented after, but that might change in
88     * future.
89 schoenebeck 970 *
90     * @e Caution: for @c T_DEEP_COPY=true you might probably @e NOT want
91     * to call this method at all, at least not in case type @c T allocates
92     * any additional data on the heap by itself.
93 schoenebeck 53 */
94     inline void fill_write_space_with_null() {
95     int w = atomic_read(&write_ptr),
96     r = atomic_read(&read_ptr);
97     memset(get_write_ptr(), 0, write_space_to_end());
98     if (r && w >= r) {
99     memset(get_buffer_begin(), 0, r - 1);
100     }
101    
102     // set the wrap space elements to null
103     if (wrap_elements) memset(&buf[size], 0, wrap_elements);
104     }
105    
106     __inline int read (T *dest, int cnt);
107     __inline int write (T *src, int cnt);
108    
109     inline int push(T* src) { return write(src,1); }
110     inline int pop(T* dst) { return read(dst,1); }
111    
112     __inline T *get_buffer_begin();
113    
114     __inline T *get_read_ptr(void) {
115     return(&buf[atomic_read(&read_ptr)]);
116     }
117    
118     /**
119     * Returns a pointer to the element from the current read position,
120     * advanced by \a offset elements.
121     */
122     /*inline T* get_read_ptr(int offset) {
123     int r = atomic_read(&read_ptr);
124     r += offset;
125     r &= size_mask;
126     return &buf[r];
127     }*/
128    
129     __inline T *get_write_ptr();
130     __inline void increment_read_ptr(int cnt) {
131     atomic_set(&read_ptr , (atomic_read(&read_ptr) + cnt) & size_mask);
132     }
133     __inline void set_read_ptr(int val) {
134     atomic_set(&read_ptr , val);
135     }
136    
137     __inline void increment_write_ptr(int cnt) {
138     atomic_set(&write_ptr, (atomic_read(&write_ptr) + cnt) & size_mask);
139     }
140    
141     /* this function increments the write_ptr by cnt, if the buffer wraps then
142     subtract size from the write_ptr value so that it stays within 0<write_ptr<size
143     use this function to increment the write ptr after you filled the buffer
144     with a number of elements given by write_space_to_end_with_wrap().
145     This ensures that the data that is written to the buffer fills up all
146     the wrap space that resides past the regular buffer. The wrap_space is needed for
147     interpolation. So that the audio thread sees the ringbuffer like a linear space
148     which allows us to use faster routines.
149     When the buffer wraps the wrap part is memcpy()ied to the beginning of the buffer
150     and the write ptr incremented accordingly.
151     */
152     __inline void increment_write_ptr_with_wrap(int cnt) {
153     int w=atomic_read(&write_ptr);
154     w += cnt;
155     if(w >= size) {
156     w -= size;
157 schoenebeck 970 copy(&buf[0], &buf[size], w);
158 schoenebeck 53 //printf("DEBUG !!!! increment_write_ptr_with_wrap: buffer wrapped, elements wrapped = %d (wrap_elements %d)\n",w,wrap_elements);
159     }
160     atomic_set(&write_ptr, w);
161     }
162    
163     /* this function returns the available write space in the buffer
164     when the read_ptr > write_ptr it returns the space inbetween, otherwise
165     when the read_ptr < write_ptr it returns the space between write_ptr and
166     the buffer end, including the wrap_space.
167     There is an exception to it. When read_ptr <= wrap_elements. In that
168     case we return the write space to buffer end (-1) without the wrap_elements,
169     this is needed because a subsequent increment_write_ptr which copies the
170     data that resides into the wrap space to the beginning of the buffer and increments
171     the write_ptr would cause the write_ptr overstepping the read_ptr which would be an error.
172     So basically the if(r<=wrap_elements) we return the buffer space to end - 1 which
173     ensures that at the next call there will be one element free to write before the buffer wraps
174     and usually (except in EOF situations) the next write_space_to_end_with_wrap() will return
175     1 + wrap_elements which ensures that the wrap part gets fully filled with data
176     */
177     __inline int write_space_to_end_with_wrap() {
178     int w, r;
179    
180     w = atomic_read(&write_ptr);
181     r = atomic_read(&read_ptr);
182     //printf("write_space_to_end: w=%d r=%d\n",w,r);
183     if(r > w) {
184     //printf("DEBUG: write_space_to_end_with_wrap: r>w r=%d w=%d val=%d\n",r,w,r - w - 1);
185     return(r - w - 1);
186     }
187     if(r <= wrap_elements) {
188     //printf("DEBUG: write_space_to_end_with_wrap: ATTENTION r <= wrap_elements: r=%d w=%d val=%d\n",r,w,size - w -1);
189     return(size - w -1);
190     }
191     if(r) {
192     //printf("DEBUG: write_space_to_end_with_wrap: r=%d w=%d val=%d\n",r,w,size - w + wrap_elements);
193     return(size - w + wrap_elements);
194     }
195     //printf("DEBUG: write_space_to_end_with_wrap: r=0 w=%d val=%d\n",w,size - w - 1 + wrap_elements);
196     return(size - w - 1 + wrap_elements);
197     }
198    
199     /* this function adjusts the number of elements to write into the ringbuffer
200     in a way that the size boundary is avoided and that the wrap space always gets
201     entirely filled.
202     cnt contains the write_space_to_end_with_wrap() amount while
203     capped_cnt contains a capped amount of samples to read.
204     normally capped_cnt == cnt but in some cases eg when the disk thread needs
205     to refill tracks with smaller blocks because lots of streams require immediate
206     refill because lots of notes were started simultaneously.
207     In that case we set for example capped_cnt to a fixed amount (< cnt, eg 64k),
208     which helps to reduce the buffer refill latencies that occur between streams.
209     the first if() checks if the current write_ptr + capped_cnt resides within
210     the wrap area but is < size+wrap_elements. in that case we cannot return
211     capped_cnt because it would lead to a write_ptr wrapping and only a partial fill
212     of wrap space which would lead to errors. So we simply return cnt which ensures
213     that the the entire wrap space will get filled correctly.
214     In all other cases (which are not problematic because no write_ptr wrapping
215     occurs) we simply return capped_cnt.
216     */
217     __inline int adjust_write_space_to_avoid_boundary(int cnt, int capped_cnt) {
218     int w;
219     w = atomic_read(&write_ptr);
220     if((w+capped_cnt) >= size && (w+capped_cnt) < (size+wrap_elements)) {
221     //printf("adjust_write_space_to_avoid_boundary returning cnt = %d\n",cnt);
222     return(cnt);
223     }
224     //printf("adjust_write_space_to_avoid_boundary returning capped_cnt = %d\n",capped_cnt);
225     return(capped_cnt);
226     }
227    
228     __inline int write_space_to_end() {
229     int w, r;
230    
231     w = atomic_read(&write_ptr);
232     r = atomic_read(&read_ptr);
233     //printf("write_space_to_end: w=%d r=%d\n",w,r);
234     if(r > w) return(r - w - 1);
235     if(r) return(size - w);
236     return(size - w - 1);
237     }
238    
239     __inline int read_space_to_end() {
240     int w, r;
241    
242     w = atomic_read(&write_ptr);
243     r = atomic_read(&read_ptr);
244     if(w >= r) return(w - r);
245     return(size - r);
246     }
247     __inline void init() {
248     atomic_set(&write_ptr, 0);
249     atomic_set(&read_ptr, 0);
250     // wrap=0;
251     }
252    
253     int write_space () {
254     int w, r;
255    
256     w = atomic_read(&write_ptr);
257     r = atomic_read(&read_ptr);
258    
259     if (w > r) {
260     return ((r - w + size) & size_mask) - 1;
261     } else if (w < r) {
262     return (r - w) - 1;
263     } else {
264     return size - 1;
265     }
266     }
267    
268     int read_space () {
269     int w, r;
270    
271     w = atomic_read(&write_ptr);
272     r = atomic_read(&read_ptr);
273    
274     if (w >= r) {
275     return w - r;
276     } else {
277     return (w - r + size) & size_mask;
278     }
279     }
280    
281     int size;
282     int wrap_elements;
283    
284 schoenebeck 243 /**
285     * Independent, random access reading from a RingBuffer. This class
286     * allows to read from a RingBuffer without being forced to free read
287     * data while reading / positioning.
288     */
289 schoenebeck 970 template<class T1, bool T1_DEEP_COPY>
290 schoenebeck 243 class _NonVolatileReader {
291     public:
292     int read_space() {
293     int r = read_ptr;
294     int w = atomic_read(&pBuf->write_ptr);
295     return (w >= r) ? w - r : (w - r + pBuf->size) & pBuf->size_mask;
296     }
297    
298     /**
299 schoenebeck 294 * Prefix decrement operator, for reducing NonVolatileReader's
300     * read position by one.
301     */
302     inline void operator--() {
303     if (read_ptr == atomic_read(&pBuf->read_ptr)) return; //TODO: or should we react oh this case (e.g. force segfault), as this is a very odd case?
304     --read_ptr & pBuf->size_mask;
305     }
306    
307     /**
308     * Postfix decrement operator, for reducing NonVolatileReader's
309     * read position by one.
310     */
311     inline void operator--(int) {
312     --*this;
313     }
314    
315     /**
316     * Returns pointer to the RingBuffer data of current
317     * NonVolatileReader's read position and increments
318     * NonVolatileReader's read position by one.
319     *
320     * @returns pointer to element of current read position
321     */
322     T* pop() {
323     if (!read_space()) return NULL;
324     T* pData = &pBuf->buf[read_ptr];
325     read_ptr++;
326     read_ptr &= pBuf->size_mask;
327     return pData;
328     }
329    
330     /**
331 schoenebeck 243 * Reads one element from the NonVolatileReader's current read
332     * position and copies it to the variable pointed by \a dst and
333     * finally increments the NonVolatileReader's read position by
334     * one.
335     *
336     * @param dst - where the element is copied to
337     * @returns 1 on success, 0 otherwise
338     */
339     int pop(T* dst) { return read(dst,1); }
340    
341     /**
342     * Reads \a cnt elements from the NonVolatileReader's current
343     * read position and copies it to the buffer pointed by \a dest
344     * and finally increments the NonVolatileReader's read position
345     * by the number of read elements.
346     *
347     * @param dest - destination buffer
348     * @param cnt - number of elements to read
349     * @returns number of read elements
350     */
351     int read(T* dest, int cnt) {
352     int free_cnt;
353     int cnt2;
354     int to_read;
355     int n1, n2;
356     int priv_read_ptr;
357    
358     priv_read_ptr = read_ptr;
359    
360     if ((free_cnt = read_space()) == 0) return 0;
361    
362     to_read = cnt > free_cnt ? free_cnt : cnt;
363    
364     cnt2 = priv_read_ptr + to_read;
365    
366     if (cnt2 > pBuf->size) {
367     n1 = pBuf->size - priv_read_ptr;
368     n2 = cnt2 & pBuf->size_mask;
369     } else {
370     n1 = to_read;
371     n2 = 0;
372     }
373    
374 schoenebeck 970 copy(dest, &pBuf->buf[priv_read_ptr], n1);
375 schoenebeck 243 priv_read_ptr = (priv_read_ptr + n1) & pBuf->size_mask;
376    
377     if (n2) {
378 schoenebeck 970 copy(dest+n1, pBuf->buf, n2);
379 schoenebeck 243 priv_read_ptr = n2;
380     }
381    
382     this->read_ptr = priv_read_ptr;
383     return to_read;
384     }
385 schoenebeck 294
386     /**
387     * Finally when the read data is not needed anymore, this method
388     * should be called to free the data in the RingBuffer up to the
389     * current read position of this NonVolatileReader.
390     *
391     * @see RingBuffer::increment_read_ptr()
392     */
393     void free() {
394     atomic_set(&pBuf->read_ptr, read_ptr);
395     }
396    
397 schoenebeck 243 protected:
398 schoenebeck 970 _NonVolatileReader(RingBuffer<T1,T1_DEEP_COPY>* pBuf) {
399 schoenebeck 243 this->pBuf = pBuf;
400     this->read_ptr = atomic_read(&pBuf->read_ptr);
401     }
402    
403 schoenebeck 970 RingBuffer<T1,T1_DEEP_COPY>* pBuf;
404 schoenebeck 243 int read_ptr;
405    
406 schoenebeck 970 friend class RingBuffer<T1,T1_DEEP_COPY>;
407 schoenebeck 243 };
408    
409 schoenebeck 970 typedef _NonVolatileReader<T,T_DEEP_COPY> NonVolatileReader;
410 schoenebeck 243
411     NonVolatileReader get_non_volatile_reader() { return NonVolatileReader(this); }
412    
413 schoenebeck 53 protected:
414     T *buf;
415     atomic_t write_ptr;
416     atomic_t read_ptr;
417     int size_mask;
418 schoenebeck 277
419 schoenebeck 970 /**
420     * Copies \a n amount of elements from the buffer given by
421     * \a pSrc to the buffer given by \a pDst.
422     */
423     inline static void copy(T* pDst, T* pSrc, int n);
424    
425     friend class _NonVolatileReader<T,T_DEEP_COPY>;
426 schoenebeck 53 };
427    
428 schoenebeck 970 template<class T, bool T_DEEP_COPY>
429     T* RingBuffer<T,T_DEEP_COPY>::get_write_ptr (void) {
430 schoenebeck 53 return(&buf[atomic_read(&write_ptr)]);
431     }
432    
433 schoenebeck 970 template<class T, bool T_DEEP_COPY>
434     T* RingBuffer<T,T_DEEP_COPY>::get_buffer_begin (void) {
435 schoenebeck 53 return(buf);
436     }
437    
438    
439    
440 schoenebeck 970 template<class T, bool T_DEEP_COPY>
441     int RingBuffer<T,T_DEEP_COPY>::read(T* dest, int cnt)
442 schoenebeck 53 {
443     int free_cnt;
444     int cnt2;
445     int to_read;
446     int n1, n2;
447     int priv_read_ptr;
448    
449     priv_read_ptr=atomic_read(&read_ptr);
450    
451     if ((free_cnt = read_space ()) == 0) {
452     return 0;
453     }
454    
455     to_read = cnt > free_cnt ? free_cnt : cnt;
456    
457     cnt2 = priv_read_ptr + to_read;
458    
459     if (cnt2 > size) {
460     n1 = size - priv_read_ptr;
461     n2 = cnt2 & size_mask;
462     } else {
463     n1 = to_read;
464     n2 = 0;
465     }
466    
467 schoenebeck 970 copy(dest, &buf[priv_read_ptr], n1);
468 schoenebeck 53 priv_read_ptr = (priv_read_ptr + n1) & size_mask;
469    
470     if (n2) {
471 schoenebeck 970 copy(dest+n1, buf, n2);
472 schoenebeck 53 priv_read_ptr = n2;
473     }
474    
475     atomic_set(&read_ptr, priv_read_ptr);
476     return to_read;
477     }
478    
479 schoenebeck 970 template<class T, bool T_DEEP_COPY>
480     int RingBuffer<T,T_DEEP_COPY>::write(T* src, int cnt)
481 schoenebeck 53 {
482     int free_cnt;
483     int cnt2;
484     int to_write;
485     int n1, n2;
486     int priv_write_ptr;
487    
488     priv_write_ptr=atomic_read(&write_ptr);
489    
490     if ((free_cnt = write_space ()) == 0) {
491     return 0;
492     }
493    
494     to_write = cnt > free_cnt ? free_cnt : cnt;
495    
496     cnt2 = priv_write_ptr + to_write;
497    
498     if (cnt2 > size) {
499     n1 = size - priv_write_ptr;
500     n2 = cnt2 & size_mask;
501     } else {
502     n1 = to_write;
503     n2 = 0;
504     }
505    
506 schoenebeck 970 copy(&buf[priv_write_ptr], src, n1);
507 schoenebeck 53 priv_write_ptr = (priv_write_ptr + n1) & size_mask;
508    
509     if (n2) {
510 schoenebeck 970 copy(buf, src+n1, n2);
511 schoenebeck 53 priv_write_ptr = n2;
512     }
513     atomic_set(&write_ptr, priv_write_ptr);
514     return to_write;
515     }
516    
517 schoenebeck 970 template<class T, bool T_DEEP_COPY>
518     void RingBuffer<T,T_DEEP_COPY>::copy(T* pDst, T* pSrc, int n) {
519     if (T_DEEP_COPY) { // deep copy - won't work for data structures without assignment operator implementation
520     for (int i = 0; i < n; i++) pDst[i] = pSrc[i];
521     } else { // flat copy - won't work for complex data structures !
522     memcpy(pDst, pSrc, n * sizeof(T));
523     }
524     }
525 schoenebeck 53
526     #endif /* RINGBUFFER_H */

  ViewVC Help
Powered by ViewVC