1 |
schoenebeck |
5 |
/*************************************************************************** |
2 |
|
|
* * |
3 |
|
|
* LinuxSampler - modular, streaming capable sampler * |
4 |
|
|
* * |
5 |
|
|
* Copyright (C) 2003 by Benno Senoner and Christian Schoenebeck * |
6 |
|
|
* * |
7 |
|
|
* This program is free software; you can redistribute it and/or modify * |
8 |
|
|
* it under the terms of the GNU General Public License as published by * |
9 |
|
|
* the Free Software Foundation; either version 2 of the License, or * |
10 |
|
|
* (at your option) any later version. * |
11 |
|
|
* * |
12 |
|
|
* This program is distributed in the hope that it will be useful, * |
13 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of * |
14 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * |
15 |
|
|
* GNU General Public License for more details. * |
16 |
|
|
* * |
17 |
|
|
* You should have received a copy of the GNU General Public License * |
18 |
|
|
* along with this program; if not, write to the Free Software * |
19 |
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, * |
20 |
|
|
* MA 02111-1307 USA * |
21 |
|
|
***************************************************************************/ |
22 |
|
|
|
23 |
|
|
#ifndef RINGBUFFER_H |
24 |
|
|
#define RINGBUFFER_H |
25 |
|
|
|
26 |
|
|
#define DEFAULT_WRAP_ELEMENTS 1024 |
27 |
|
|
|
28 |
|
|
#include <string.h> |
29 |
|
|
#include <stdio.h> |
30 |
|
|
#include <asm/atomic.h> |
31 |
|
|
|
32 |
|
|
#include <sys/types.h> |
33 |
|
|
#include <pthread.h> |
34 |
|
|
|
35 |
|
|
template<class T> |
36 |
|
|
class RingBuffer |
37 |
|
|
{ |
38 |
|
|
public: |
39 |
|
|
RingBuffer (int sz, int wrap_elements = DEFAULT_WRAP_ELEMENTS) { |
40 |
|
|
int power_of_two; |
41 |
|
|
|
42 |
|
|
this->wrap_elements = wrap_elements; |
43 |
|
|
|
44 |
|
|
for (power_of_two = 1; |
45 |
|
|
1<<power_of_two < sz; |
46 |
|
|
power_of_two++); |
47 |
|
|
|
48 |
|
|
size = 1<<power_of_two; |
49 |
|
|
size_mask = size; |
50 |
|
|
size_mask -= 1; |
51 |
|
|
atomic_set(&write_ptr, 0); |
52 |
|
|
atomic_set(&read_ptr, 0); |
53 |
|
|
buf = new T[size + wrap_elements]; |
54 |
|
|
}; |
55 |
|
|
|
56 |
|
|
virtual ~RingBuffer() { |
57 |
|
|
delete [] buf; |
58 |
|
|
} |
59 |
|
|
|
60 |
|
|
/** |
61 |
|
|
* Sets all remaining write space elements to zero. The write pointer |
62 |
|
|
* will currently not be incremented after, but that might change in |
63 |
|
|
* future. |
64 |
|
|
*/ |
65 |
|
|
inline void fill_write_space_with_null() { |
66 |
|
|
int w = atomic_read(&write_ptr), |
67 |
|
|
r = atomic_read(&read_ptr); |
68 |
|
|
memset(get_write_ptr(), 0, write_space_to_end()); |
69 |
|
|
if (r && w >= r) { |
70 |
|
|
memset(get_buffer_begin(), 0, r - 1); |
71 |
|
|
} |
72 |
|
|
|
73 |
|
|
// set the wrap space elements to null |
74 |
|
|
if (wrap_elements) memset(&buf[size], 0, wrap_elements); |
75 |
|
|
} |
76 |
|
|
|
77 |
|
|
__inline int read (T *dest, int cnt); |
78 |
|
|
__inline int write (T *src, int cnt); |
79 |
|
|
__inline T *get_buffer_begin(); |
80 |
|
|
|
81 |
|
|
__inline T *get_read_ptr(void) { |
82 |
|
|
return(&buf[atomic_read(&read_ptr)]); |
83 |
|
|
} |
84 |
|
|
|
85 |
|
|
__inline T *get_write_ptr(); |
86 |
|
|
__inline void increment_read_ptr(int cnt) { |
87 |
|
|
atomic_set(&read_ptr , (atomic_read(&read_ptr) + cnt) & size_mask); |
88 |
|
|
} |
89 |
|
|
__inline void set_read_ptr(int val) { |
90 |
|
|
atomic_set(&read_ptr , val); |
91 |
|
|
} |
92 |
|
|
|
93 |
|
|
__inline void increment_write_ptr(int cnt) { |
94 |
|
|
atomic_set(&write_ptr, (atomic_read(&write_ptr) + cnt) & size_mask); |
95 |
|
|
} |
96 |
|
|
|
97 |
|
|
/* this function increments the write_ptr by cnt, if the buffer wraps then |
98 |
|
|
subtract size from the write_ptr value so that it stays within 0<write_ptr<size |
99 |
|
|
use this function to increment the write ptr after you filled the buffer |
100 |
|
|
with a number of elements given by write_space_to_end_with_wrap(). |
101 |
|
|
This ensures that the data that is written to the buffer fills up all |
102 |
|
|
the wrap space that resides past the regular buffer. The wrap_space is needed for |
103 |
|
|
interpolation. So that the audio thread sees the ringbuffer like a linear space |
104 |
|
|
which allows us to use faster routines. |
105 |
|
|
When the buffer wraps the wrap part is memcpy()ied to the beginning of the buffer |
106 |
|
|
and the write ptr incremented accordingly. |
107 |
|
|
*/ |
108 |
|
|
__inline void increment_write_ptr_with_wrap(int cnt) { |
109 |
|
|
int w=atomic_read(&write_ptr); |
110 |
|
|
w += cnt; |
111 |
|
|
if(w >= size) { |
112 |
|
|
w -= size; |
113 |
|
|
memcpy(&buf[0], &buf[size], w*sizeof(T)); |
114 |
|
|
//printf("DEBUG !!!! increment_write_ptr_with_wrap: buffer wrapped, elements wrapped = %d (wrap_elements %d)\n",w,wrap_elements); |
115 |
|
|
} |
116 |
|
|
atomic_set(&write_ptr, w); |
117 |
|
|
} |
118 |
|
|
|
119 |
|
|
/* this function returns the available write space in the buffer |
120 |
|
|
when the read_ptr > write_ptr it returns the space inbetween, otherwise |
121 |
|
|
when the read_ptr < write_ptr it returns the space between write_ptr and |
122 |
|
|
the buffer end, including the wrap_space. |
123 |
|
|
There is an exception to it. When read_ptr <= wrap_elements. In that |
124 |
|
|
case we return the write space to buffer end (-1) without the wrap_elements, |
125 |
|
|
this is needed because a subsequent increment_write_ptr which copies the |
126 |
|
|
data that resides into the wrap space to the beginning of the buffer and increments |
127 |
|
|
the write_ptr would cause the write_ptr overstepping the read_ptr which would be an error. |
128 |
|
|
So basically the if(r<=wrap_elements) we return the buffer space to end - 1 which |
129 |
|
|
ensures that at the next call there will be one element free to write before the buffer wraps |
130 |
|
|
and usually (except in EOF situations) the next write_space_to_end_with_wrap() will return |
131 |
|
|
1 + wrap_elements which ensures that the wrap part gets fully filled with data |
132 |
|
|
*/ |
133 |
|
|
__inline int write_space_to_end_with_wrap() { |
134 |
|
|
int w, r; |
135 |
|
|
|
136 |
|
|
w = atomic_read(&write_ptr); |
137 |
|
|
r = atomic_read(&read_ptr); |
138 |
|
|
//printf("write_space_to_end: w=%d r=%d\n",w,r); |
139 |
|
|
if(r > w) { |
140 |
|
|
//printf("DEBUG: write_space_to_end_with_wrap: r>w r=%d w=%d val=%d\n",r,w,r - w - 1); |
141 |
|
|
return(r - w - 1); |
142 |
|
|
} |
143 |
|
|
if(r <= wrap_elements) { |
144 |
|
|
//printf("DEBUG: write_space_to_end_with_wrap: ATTENTION r <= wrap_elements: r=%d w=%d val=%d\n",r,w,size - w -1); |
145 |
|
|
return(size - w -1); |
146 |
|
|
} |
147 |
|
|
if(r) { |
148 |
|
|
//printf("DEBUG: write_space_to_end_with_wrap: r=%d w=%d val=%d\n",r,w,size - w + wrap_elements); |
149 |
|
|
return(size - w + wrap_elements); |
150 |
|
|
} |
151 |
|
|
//printf("DEBUG: write_space_to_end_with_wrap: r=0 w=%d val=%d\n",w,size - w - 1 + wrap_elements); |
152 |
|
|
return(size - w - 1 + wrap_elements); |
153 |
|
|
} |
154 |
|
|
|
155 |
|
|
/* this function adjusts the number of elements to write into the ringbuffer |
156 |
|
|
in a way that the size boundary is avoided and that the wrap space always gets |
157 |
|
|
entirely filled. |
158 |
|
|
cnt contains the write_space_to_end_with_wrap() amount while |
159 |
|
|
capped_cnt contains a capped amount of samples to read. |
160 |
|
|
normally capped_cnt == cnt but in some cases eg when the disk thread needs |
161 |
|
|
to refill tracks with smaller blocks because lots of streams require immediate |
162 |
|
|
refill because lots of notes were started simultaneously. |
163 |
|
|
In that case we set for example capped_cnt to a fixed amount (< cnt, eg 64k), |
164 |
|
|
which helps to reduce the buffer refill latencies that occur between streams. |
165 |
|
|
the first if() checks if the current write_ptr + capped_cnt resides within |
166 |
|
|
the wrap area but is < size+wrap_elements. in that case we cannot return |
167 |
|
|
capped_cnt because it would lead to a write_ptr wrapping and only a partial fill |
168 |
|
|
of wrap space which would lead to errors. So we simply return cnt which ensures |
169 |
|
|
that the the entire wrap space will get filled correctly. |
170 |
|
|
In all other cases (which are not problematic because no write_ptr wrapping |
171 |
|
|
occurs) we simply return capped_cnt. |
172 |
|
|
*/ |
173 |
|
|
__inline int adjust_write_space_to_avoid_boundary(int cnt, int capped_cnt) { |
174 |
|
|
int w; |
175 |
|
|
w = atomic_read(&write_ptr); |
176 |
|
|
if((w+capped_cnt) >= size && (w+capped_cnt) < (size+wrap_elements)) { |
177 |
|
|
//printf("adjust_write_space_to_avoid_boundary returning cnt = %d\n",cnt); |
178 |
|
|
return(cnt); |
179 |
|
|
} |
180 |
|
|
//printf("adjust_write_space_to_avoid_boundary returning capped_cnt = %d\n",capped_cnt); |
181 |
|
|
return(capped_cnt); |
182 |
|
|
} |
183 |
|
|
|
184 |
|
|
__inline int write_space_to_end() { |
185 |
|
|
int w, r; |
186 |
|
|
|
187 |
|
|
w = atomic_read(&write_ptr); |
188 |
|
|
r = atomic_read(&read_ptr); |
189 |
|
|
//printf("write_space_to_end: w=%d r=%d\n",w,r); |
190 |
|
|
if(r > w) return(r - w - 1); |
191 |
|
|
if(r) return(size - w); |
192 |
|
|
return(size - w - 1); |
193 |
|
|
} |
194 |
|
|
|
195 |
|
|
__inline int read_space_to_end() { |
196 |
|
|
int w, r; |
197 |
|
|
|
198 |
|
|
w = atomic_read(&write_ptr); |
199 |
|
|
r = atomic_read(&read_ptr); |
200 |
|
|
if(w >= r) return(w - r); |
201 |
|
|
return(size - r); |
202 |
|
|
} |
203 |
|
|
__inline void init() { |
204 |
|
|
atomic_set(&write_ptr, 0); |
205 |
|
|
atomic_set(&read_ptr, 0); |
206 |
|
|
// wrap=0; |
207 |
|
|
} |
208 |
|
|
|
209 |
|
|
int write_space () { |
210 |
|
|
int w, r; |
211 |
|
|
|
212 |
|
|
w = atomic_read(&write_ptr); |
213 |
|
|
r = atomic_read(&read_ptr); |
214 |
|
|
|
215 |
|
|
if (w > r) { |
216 |
|
|
return ((r - w + size) & size_mask) - 1; |
217 |
|
|
} else if (w < r) { |
218 |
|
|
return (r - w) - 1; |
219 |
|
|
} else { |
220 |
|
|
return size - 1; |
221 |
|
|
} |
222 |
|
|
} |
223 |
|
|
|
224 |
|
|
int read_space () { |
225 |
|
|
int w, r; |
226 |
|
|
|
227 |
|
|
w = atomic_read(&write_ptr); |
228 |
|
|
r = atomic_read(&read_ptr); |
229 |
|
|
|
230 |
|
|
if (w >= r) { |
231 |
|
|
return w - r; |
232 |
|
|
} else { |
233 |
|
|
return (w - r + size) & size_mask; |
234 |
|
|
} |
235 |
|
|
} |
236 |
|
|
|
237 |
|
|
int size; |
238 |
|
|
int wrap_elements; |
239 |
|
|
|
240 |
|
|
protected: |
241 |
|
|
T *buf; |
242 |
|
|
atomic_t write_ptr; |
243 |
|
|
atomic_t read_ptr; |
244 |
|
|
int size_mask; |
245 |
|
|
}; |
246 |
|
|
|
247 |
|
|
template<class T> T * |
248 |
|
|
RingBuffer<T>::get_write_ptr (void) { |
249 |
|
|
return(&buf[atomic_read(&write_ptr)]); |
250 |
|
|
} |
251 |
|
|
|
252 |
|
|
template<class T> T * |
253 |
|
|
RingBuffer<T>::get_buffer_begin (void) { |
254 |
|
|
return(buf); |
255 |
|
|
} |
256 |
|
|
|
257 |
|
|
|
258 |
|
|
|
259 |
|
|
template<class T> int |
260 |
|
|
RingBuffer<T>::read (T *dest, int cnt) |
261 |
|
|
|
262 |
|
|
{ |
263 |
|
|
int free_cnt; |
264 |
|
|
int cnt2; |
265 |
|
|
int to_read; |
266 |
|
|
int n1, n2; |
267 |
|
|
int priv_read_ptr; |
268 |
|
|
|
269 |
|
|
priv_read_ptr=atomic_read(&read_ptr); |
270 |
|
|
|
271 |
|
|
if ((free_cnt = read_space ()) == 0) { |
272 |
|
|
return 0; |
273 |
|
|
} |
274 |
|
|
|
275 |
|
|
to_read = cnt > free_cnt ? free_cnt : cnt; |
276 |
|
|
|
277 |
|
|
cnt2 = priv_read_ptr + to_read; |
278 |
|
|
|
279 |
|
|
if (cnt2 > size) { |
280 |
|
|
n1 = size - priv_read_ptr; |
281 |
|
|
n2 = cnt2 & size_mask; |
282 |
|
|
} else { |
283 |
|
|
n1 = to_read; |
284 |
|
|
n2 = 0; |
285 |
|
|
} |
286 |
|
|
|
287 |
|
|
memcpy (dest, &buf[priv_read_ptr], n1 * sizeof (T)); |
288 |
|
|
priv_read_ptr = (priv_read_ptr + n1) & size_mask; |
289 |
|
|
|
290 |
|
|
if (n2) { |
291 |
|
|
memcpy (dest+n1, buf, n2 * sizeof (T)); |
292 |
|
|
priv_read_ptr = n2; |
293 |
|
|
} |
294 |
|
|
|
295 |
|
|
atomic_set(&read_ptr, priv_read_ptr); |
296 |
|
|
return to_read; |
297 |
|
|
} |
298 |
|
|
|
299 |
|
|
template<class T> int |
300 |
|
|
RingBuffer<T>::write (T *src, int cnt) |
301 |
|
|
|
302 |
|
|
{ |
303 |
|
|
int free_cnt; |
304 |
|
|
int cnt2; |
305 |
|
|
int to_write; |
306 |
|
|
int n1, n2; |
307 |
|
|
int priv_write_ptr; |
308 |
|
|
|
309 |
|
|
priv_write_ptr=atomic_read(&write_ptr); |
310 |
|
|
|
311 |
|
|
if ((free_cnt = write_space ()) == 0) { |
312 |
|
|
return 0; |
313 |
|
|
} |
314 |
|
|
|
315 |
|
|
to_write = cnt > free_cnt ? free_cnt : cnt; |
316 |
|
|
|
317 |
|
|
cnt2 = priv_write_ptr + to_write; |
318 |
|
|
|
319 |
|
|
if (cnt2 > size) { |
320 |
|
|
n1 = size - priv_write_ptr; |
321 |
|
|
n2 = cnt2 & size_mask; |
322 |
|
|
} else { |
323 |
|
|
n1 = to_write; |
324 |
|
|
n2 = 0; |
325 |
|
|
} |
326 |
|
|
|
327 |
|
|
memcpy (&buf[priv_write_ptr], src, n1 * sizeof (T)); |
328 |
|
|
priv_write_ptr = (priv_write_ptr + n1) & size_mask; |
329 |
|
|
|
330 |
|
|
if (n2) { |
331 |
|
|
memcpy (buf, src+n1, n2 * sizeof (T)); |
332 |
|
|
priv_write_ptr = n2; |
333 |
|
|
} |
334 |
|
|
atomic_set(&write_ptr, priv_write_ptr); |
335 |
|
|
return to_write; |
336 |
|
|
} |
337 |
|
|
|
338 |
|
|
|
339 |
|
|
#endif /* RINGBUFFER_H */ |