3 |
* LinuxSampler - modular, streaming capable sampler * |
* LinuxSampler - modular, streaming capable sampler * |
4 |
* * |
* * |
5 |
* Copyright (C) 2003, 2004 by Benno Senoner and Christian Schoenebeck * |
* Copyright (C) 2003, 2004 by Benno Senoner and Christian Schoenebeck * |
6 |
|
* Copyright (C) 2005 - 2012 Christian Schoenebeck * |
7 |
* * |
* * |
8 |
* This program is free software; you can redistribute it and/or modify * |
* This program is free software; you can redistribute it and/or modify * |
9 |
* it under the terms of the GNU General Public License as published by * |
* it under the terms of the GNU General Public License as published by * |
24 |
#ifndef RINGBUFFER_H |
#ifndef RINGBUFFER_H |
25 |
#define RINGBUFFER_H |
#define RINGBUFFER_H |
26 |
|
|
27 |
#define DEFAULT_WRAP_ELEMENTS 1024 |
#define DEFAULT_WRAP_ELEMENTS 0 |
28 |
|
|
29 |
#include <string.h> |
#include <string.h> |
30 |
|
|
31 |
#include "atomic.h" |
#include "lsatomic.h" |
32 |
|
|
33 |
template<class T> |
using LinuxSampler::atomic; |
34 |
|
using LinuxSampler::memory_order_relaxed; |
35 |
|
using LinuxSampler::memory_order_acquire; |
36 |
|
using LinuxSampler::memory_order_release; |
37 |
|
|
38 |
|
|
39 |
|
/** @brief Real-time safe and type safe RingBuffer implementation. |
40 |
|
* |
41 |
|
* This constant size buffer can be used to send data from exactly one |
42 |
|
* sender / writing thread to exactly one receiver / reading thread. It is |
43 |
|
* real-time safe due to the fact that data is only allocated when this |
44 |
|
* RingBuffer is created and no system level mechanisms are used for |
45 |
|
* ensuring thread safety of this class. |
46 |
|
* |
47 |
|
* <b>Important:</b> There are two distinct behaviors of this RingBuffer |
48 |
|
* which has to be given as template argument @c T_DEEP_COPY, which is a |
49 |
|
* boolean flag: |
50 |
|
* |
51 |
|
* - @c true: The RingBuffer will copy elements of type @c T by using type |
52 |
|
* @c T's assignment operator. This behavior is mandatory for all data |
53 |
|
* structures (classes) which additionally allocate memory on the heap. |
54 |
|
* Type @c T's needs to have an assignment operator implementation though, |
55 |
|
* otherwise this will cause a compilation error. This behavior is more |
56 |
|
* safe, but usually slower (except for very small buffer sizes, where it |
57 |
|
* might be even faster). |
58 |
|
* - @c false: The RingBuffer will copy elements of type @c T by flatly |
59 |
|
* copying their structural data ( i.e. with @c memcpy() ) in one piece. |
60 |
|
* This will only work if class @c T (and all of its subelements) does not |
61 |
|
* allocate any additional data on the heap by itself. So use this option |
62 |
|
* with great care, because otherwise it will result in very ugly behavior |
63 |
|
* and crashes! For larger buffer sizes, this behavior will most probably |
64 |
|
* be faster. |
65 |
|
*/ |
66 |
|
template<class T, bool T_DEEP_COPY> |
67 |
class RingBuffer |
class RingBuffer |
68 |
{ |
{ |
69 |
public: |
public: |
70 |
RingBuffer (int sz, int wrap_elements = DEFAULT_WRAP_ELEMENTS) { |
RingBuffer (int sz, int wrap_elements = DEFAULT_WRAP_ELEMENTS) : |
71 |
int power_of_two; |
write_ptr(0), read_ptr(0) |
72 |
|
{ |
73 |
this->wrap_elements = wrap_elements; |
_allocBuffer(sz, wrap_elements); |
74 |
|
} |
75 |
for (power_of_two = 1; |
|
76 |
1<<power_of_two < sz; |
/** |
77 |
power_of_two++); |
* Resize this ring buffer to the given size. This operation |
78 |
|
* is not thread safe! Any operations using this RingBuffer |
79 |
size = 1<<power_of_two; |
* have to be stopped before calling this method. |
80 |
size_mask = size; |
* |
81 |
size_mask -= 1; |
* @param sz - new size (amount of elements) |
82 |
atomic_set(&write_ptr, 0); |
* @param wrap_elements - (optional) if supplied, the new amount |
83 |
atomic_set(&read_ptr, 0); |
* of wrap elements to be used beyond |
84 |
buf = new T[size + wrap_elements]; |
* official buffer end, if not provided |
85 |
}; |
* the amount wrap_elements remains as it was |
86 |
|
* before |
87 |
|
*/ |
88 |
|
void resize(int sz, int wrap_elements = -1) { |
89 |
|
if (wrap_elements == -1) |
90 |
|
wrap_elements = this->wrap_elements; |
91 |
|
|
92 |
|
delete [] buf; |
93 |
|
|
94 |
|
_allocBuffer(sz, wrap_elements); |
95 |
|
} |
96 |
|
|
97 |
virtual ~RingBuffer() { |
virtual ~RingBuffer() { |
98 |
delete [] buf; |
delete [] buf; |
102 |
* Sets all remaining write space elements to zero. The write pointer |
* Sets all remaining write space elements to zero. The write pointer |
103 |
* will currently not be incremented after, but that might change in |
* will currently not be incremented after, but that might change in |
104 |
* future. |
* future. |
105 |
|
* |
106 |
|
* @e Caution: for @c T_DEEP_COPY=true you might probably @e NOT want |
107 |
|
* to call this method at all, at least not in case type @c T allocates |
108 |
|
* any additional data on the heap by itself. |
109 |
*/ |
*/ |
110 |
inline void fill_write_space_with_null() { |
inline void fill_write_space_with_null() { |
111 |
int w = atomic_read(&write_ptr), |
int w = write_ptr.load(memory_order_relaxed), |
112 |
r = atomic_read(&read_ptr); |
r = read_ptr.load(memory_order_acquire); |
113 |
memset(get_write_ptr(), 0, write_space_to_end()); |
memset(get_write_ptr(), 0, sizeof(T)*write_space_to_end()); |
114 |
if (r && w >= r) { |
if (r && w >= r) { |
115 |
memset(get_buffer_begin(), 0, r - 1); |
memset(get_buffer_begin(), 0, sizeof(T)*(r - 1)); |
116 |
} |
} |
117 |
|
|
118 |
// set the wrap space elements to null |
// set the wrap space elements to null |
119 |
if (wrap_elements) memset(&buf[size], 0, wrap_elements); |
if (wrap_elements) memset(&buf[size], 0, sizeof(T)*wrap_elements); |
120 |
} |
} |
121 |
|
|
122 |
__inline int read (T *dest, int cnt); |
__inline int read (T *dest, int cnt); |
128 |
__inline T *get_buffer_begin(); |
__inline T *get_buffer_begin(); |
129 |
|
|
130 |
__inline T *get_read_ptr(void) { |
__inline T *get_read_ptr(void) { |
131 |
return(&buf[atomic_read(&read_ptr)]); |
return(&buf[read_ptr.load(memory_order_relaxed)]); |
132 |
} |
} |
133 |
|
|
134 |
/** |
/** |
136 |
* advanced by \a offset elements. |
* advanced by \a offset elements. |
137 |
*/ |
*/ |
138 |
/*inline T* get_read_ptr(int offset) { |
/*inline T* get_read_ptr(int offset) { |
139 |
int r = atomic_read(&read_ptr); |
int r = read_ptr.load(memory_order_relaxed); |
140 |
r += offset; |
r += offset; |
141 |
r &= size_mask; |
r &= size_mask; |
142 |
return &buf[r]; |
return &buf[r]; |
144 |
|
|
145 |
__inline T *get_write_ptr(); |
__inline T *get_write_ptr(); |
146 |
__inline void increment_read_ptr(int cnt) { |
__inline void increment_read_ptr(int cnt) { |
147 |
atomic_set(&read_ptr , (atomic_read(&read_ptr) + cnt) & size_mask); |
read_ptr.store((read_ptr.load(memory_order_relaxed) + cnt) & size_mask, memory_order_release); |
148 |
} |
} |
149 |
__inline void set_read_ptr(int val) { |
__inline void set_read_ptr(int val) { |
150 |
atomic_set(&read_ptr , val); |
read_ptr.store(val, memory_order_release); |
151 |
} |
} |
152 |
|
|
153 |
__inline void increment_write_ptr(int cnt) { |
__inline void increment_write_ptr(int cnt) { |
154 |
atomic_set(&write_ptr, (atomic_read(&write_ptr) + cnt) & size_mask); |
write_ptr.store((write_ptr.load(memory_order_relaxed) + cnt) & size_mask, memory_order_release); |
155 |
} |
} |
156 |
|
|
157 |
/* this function increments the write_ptr by cnt, if the buffer wraps then |
/* this function increments the write_ptr by cnt, if the buffer wraps then |
166 |
and the write ptr incremented accordingly. |
and the write ptr incremented accordingly. |
167 |
*/ |
*/ |
168 |
__inline void increment_write_ptr_with_wrap(int cnt) { |
__inline void increment_write_ptr_with_wrap(int cnt) { |
169 |
int w=atomic_read(&write_ptr); |
int w = write_ptr.load(memory_order_relaxed); |
170 |
w += cnt; |
w += cnt; |
171 |
if(w >= size) { |
if(w >= size) { |
172 |
w -= size; |
w -= size; |
173 |
memcpy(&buf[0], &buf[size], w*sizeof(T)); |
copy(&buf[0], &buf[size], w); |
174 |
//printf("DEBUG !!!! increment_write_ptr_with_wrap: buffer wrapped, elements wrapped = %d (wrap_elements %d)\n",w,wrap_elements); |
//printf("DEBUG !!!! increment_write_ptr_with_wrap: buffer wrapped, elements wrapped = %d (wrap_elements %d)\n",w,wrap_elements); |
175 |
} |
} |
176 |
atomic_set(&write_ptr, w); |
write_ptr.store(w, memory_order_release); |
177 |
} |
} |
178 |
|
|
179 |
/* this function returns the available write space in the buffer |
/* this function returns the available write space in the buffer |
193 |
__inline int write_space_to_end_with_wrap() { |
__inline int write_space_to_end_with_wrap() { |
194 |
int w, r; |
int w, r; |
195 |
|
|
196 |
w = atomic_read(&write_ptr); |
w = write_ptr.load(memory_order_relaxed); |
197 |
r = atomic_read(&read_ptr); |
r = read_ptr.load(memory_order_acquire); |
198 |
//printf("write_space_to_end: w=%d r=%d\n",w,r); |
//printf("write_space_to_end: w=%d r=%d\n",w,r); |
199 |
if(r > w) { |
if(r > w) { |
200 |
//printf("DEBUG: write_space_to_end_with_wrap: r>w r=%d w=%d val=%d\n",r,w,r - w - 1); |
//printf("DEBUG: write_space_to_end_with_wrap: r>w r=%d w=%d val=%d\n",r,w,r - w - 1); |
232 |
*/ |
*/ |
233 |
__inline int adjust_write_space_to_avoid_boundary(int cnt, int capped_cnt) { |
__inline int adjust_write_space_to_avoid_boundary(int cnt, int capped_cnt) { |
234 |
int w; |
int w; |
235 |
w = atomic_read(&write_ptr); |
w = write_ptr.load(memory_order_relaxed); |
236 |
if((w+capped_cnt) >= size && (w+capped_cnt) < (size+wrap_elements)) { |
if((w+capped_cnt) >= size && (w+capped_cnt) < (size+wrap_elements)) { |
237 |
//printf("adjust_write_space_to_avoid_boundary returning cnt = %d\n",cnt); |
//printf("adjust_write_space_to_avoid_boundary returning cnt = %d\n",cnt); |
238 |
return(cnt); |
return(cnt); |
244 |
__inline int write_space_to_end() { |
__inline int write_space_to_end() { |
245 |
int w, r; |
int w, r; |
246 |
|
|
247 |
w = atomic_read(&write_ptr); |
w = write_ptr.load(memory_order_relaxed); |
248 |
r = atomic_read(&read_ptr); |
r = read_ptr.load(memory_order_acquire); |
249 |
//printf("write_space_to_end: w=%d r=%d\n",w,r); |
//printf("write_space_to_end: w=%d r=%d\n",w,r); |
250 |
if(r > w) return(r - w - 1); |
if(r > w) return(r - w - 1); |
251 |
if(r) return(size - w); |
if(r) return(size - w); |
255 |
__inline int read_space_to_end() { |
__inline int read_space_to_end() { |
256 |
int w, r; |
int w, r; |
257 |
|
|
258 |
w = atomic_read(&write_ptr); |
w = write_ptr.load(memory_order_acquire); |
259 |
r = atomic_read(&read_ptr); |
r = read_ptr.load(memory_order_relaxed); |
260 |
if(w >= r) return(w - r); |
if(w >= r) return(w - r); |
261 |
return(size - r); |
return(size - r); |
262 |
} |
} |
263 |
__inline void init() { |
__inline void init() { |
264 |
atomic_set(&write_ptr, 0); |
write_ptr.store(0, memory_order_relaxed); |
265 |
atomic_set(&read_ptr, 0); |
read_ptr.store(0, memory_order_relaxed); |
266 |
// wrap=0; |
// wrap=0; |
267 |
} |
} |
268 |
|
|
269 |
int write_space () { |
int write_space () { |
270 |
int w, r; |
int w, r; |
271 |
|
|
272 |
w = atomic_read(&write_ptr); |
w = write_ptr.load(memory_order_relaxed); |
273 |
r = atomic_read(&read_ptr); |
r = read_ptr.load(memory_order_acquire); |
274 |
|
|
275 |
if (w > r) { |
if (w > r) { |
276 |
return ((r - w + size) & size_mask) - 1; |
return ((r - w + size) & size_mask) - 1; |
284 |
int read_space () { |
int read_space () { |
285 |
int w, r; |
int w, r; |
286 |
|
|
287 |
w = atomic_read(&write_ptr); |
w = write_ptr.load(memory_order_acquire); |
288 |
r = atomic_read(&read_ptr); |
r = read_ptr.load(memory_order_relaxed); |
289 |
|
|
290 |
if (w >= r) { |
if (w >= r) { |
291 |
return w - r; |
return w - r; |
302 |
* allows to read from a RingBuffer without being forced to free read |
* allows to read from a RingBuffer without being forced to free read |
303 |
* data while reading / positioning. |
* data while reading / positioning. |
304 |
*/ |
*/ |
305 |
template<class T1> |
template<class T1, bool T1_DEEP_COPY> |
306 |
class _NonVolatileReader { |
class _NonVolatileReader { |
307 |
public: |
public: |
308 |
int read_space() { |
int read_space() { |
309 |
int r = read_ptr; |
int r = read_ptr; |
310 |
int w = atomic_read(&pBuf->write_ptr); |
int w = pBuf->write_ptr.load(memory_order_acquire); |
311 |
return (w >= r) ? w - r : (w - r + pBuf->size) & pBuf->size_mask; |
return (w >= r) ? w - r : (w - r + pBuf->size) & pBuf->size_mask; |
312 |
} |
} |
313 |
|
|
316 |
* read position by one. |
* read position by one. |
317 |
*/ |
*/ |
318 |
inline void operator--() { |
inline void operator--() { |
319 |
if (read_ptr == atomic_read(&pBuf->read_ptr)) return; //TODO: or should we react oh this case (e.g. force segfault), as this is a very odd case? |
if (read_ptr == pBuf->read_ptr.load(memory_order_relaxed)) return; //TODO: or should we react oh this case (e.g. force segfault), as this is a very odd case? |
320 |
--read_ptr & pBuf->size_mask; |
read_ptr = (read_ptr-1) & pBuf->size_mask; |
321 |
} |
} |
322 |
|
|
323 |
/** |
/** |
327 |
inline void operator--(int) { |
inline void operator--(int) { |
328 |
--*this; |
--*this; |
329 |
} |
} |
330 |
|
|
331 |
|
/** |
332 |
|
* "Increment assign" operator, for advancing NonVolatileReader's |
333 |
|
* read position by @a n elements. |
334 |
|
* |
335 |
|
* @param n - amount of elements to advance read position |
336 |
|
*/ |
337 |
|
inline void operator+=(int n) { |
338 |
|
if (read_space() < n) return; |
339 |
|
read_ptr = (read_ptr+n) & pBuf->size_mask; |
340 |
|
} |
341 |
|
|
342 |
/** |
/** |
343 |
* Returns pointer to the RingBuffer data of current |
* Returns pointer to the RingBuffer data of current |
398 |
n2 = 0; |
n2 = 0; |
399 |
} |
} |
400 |
|
|
401 |
memcpy(dest, &pBuf->buf[priv_read_ptr], n1 * sizeof(T)); |
copy(dest, &pBuf->buf[priv_read_ptr], n1); |
402 |
priv_read_ptr = (priv_read_ptr + n1) & pBuf->size_mask; |
priv_read_ptr = (priv_read_ptr + n1) & pBuf->size_mask; |
403 |
|
|
404 |
if (n2) { |
if (n2) { |
405 |
memcpy(dest+n1, pBuf->buf, n2 * sizeof(T)); |
copy(dest+n1, pBuf->buf, n2); |
406 |
priv_read_ptr = n2; |
priv_read_ptr = n2; |
407 |
} |
} |
408 |
|
|
418 |
* @see RingBuffer::increment_read_ptr() |
* @see RingBuffer::increment_read_ptr() |
419 |
*/ |
*/ |
420 |
void free() { |
void free() { |
421 |
atomic_set(&pBuf->read_ptr, read_ptr); |
pBuf->read_ptr.store(read_ptr, memory_order_release); |
422 |
} |
} |
423 |
|
|
424 |
protected: |
protected: |
425 |
_NonVolatileReader(RingBuffer<T1>* pBuf) { |
_NonVolatileReader(RingBuffer<T1,T1_DEEP_COPY>* pBuf) { |
426 |
this->pBuf = pBuf; |
this->pBuf = pBuf; |
427 |
this->read_ptr = atomic_read(&pBuf->read_ptr); |
this->read_ptr = pBuf->read_ptr.load(memory_order_relaxed); |
428 |
} |
} |
429 |
|
|
430 |
RingBuffer<T1>* pBuf; |
RingBuffer<T1,T1_DEEP_COPY>* pBuf; |
431 |
int read_ptr; |
int read_ptr; |
432 |
|
|
433 |
friend class RingBuffer<T1>; |
friend class RingBuffer<T1,T1_DEEP_COPY>; |
434 |
}; |
}; |
435 |
|
|
436 |
typedef _NonVolatileReader<T> NonVolatileReader; |
typedef _NonVolatileReader<T,T_DEEP_COPY> NonVolatileReader; |
437 |
|
|
438 |
NonVolatileReader get_non_volatile_reader() { return NonVolatileReader(this); } |
NonVolatileReader get_non_volatile_reader() { return NonVolatileReader(this); } |
439 |
|
|
440 |
protected: |
protected: |
441 |
T *buf; |
T *buf; |
442 |
atomic_t write_ptr; |
atomic<int> write_ptr; |
443 |
atomic_t read_ptr; |
atomic<int> read_ptr; |
444 |
int size_mask; |
int size_mask; |
445 |
|
|
446 |
friend class _NonVolatileReader<T>; |
/** |
447 |
|
* Copies \a n amount of elements from the buffer given by |
448 |
|
* \a pSrc to the buffer given by \a pDst. |
449 |
|
*/ |
450 |
|
inline static void copy(T* pDst, T* pSrc, int n); |
451 |
|
|
452 |
|
void _allocBuffer(int sz, int wrap_elements) { |
453 |
|
this->wrap_elements = wrap_elements; |
454 |
|
|
455 |
|
// the write-with-wrap functions need wrap_elements extra |
456 |
|
// space in the buffer to be able to copy the wrap space |
457 |
|
sz += wrap_elements; |
458 |
|
|
459 |
|
int power_of_two; |
460 |
|
for (power_of_two = 1; |
461 |
|
1<<power_of_two < sz; |
462 |
|
power_of_two++); |
463 |
|
|
464 |
|
size = 1<<power_of_two; |
465 |
|
size_mask = size; |
466 |
|
size_mask -= 1; |
467 |
|
buf = new T[size + wrap_elements]; |
468 |
|
} |
469 |
|
|
470 |
|
friend class _NonVolatileReader<T,T_DEEP_COPY>; |
471 |
}; |
}; |
472 |
|
|
473 |
template<class T> T * |
template<class T, bool T_DEEP_COPY> |
474 |
RingBuffer<T>::get_write_ptr (void) { |
T* RingBuffer<T,T_DEEP_COPY>::get_write_ptr (void) { |
475 |
return(&buf[atomic_read(&write_ptr)]); |
return(&buf[write_ptr.load(memory_order_relaxed)]); |
476 |
} |
} |
477 |
|
|
478 |
template<class T> T * |
template<class T, bool T_DEEP_COPY> |
479 |
RingBuffer<T>::get_buffer_begin (void) { |
T* RingBuffer<T,T_DEEP_COPY>::get_buffer_begin (void) { |
480 |
return(buf); |
return(buf); |
481 |
} |
} |
482 |
|
|
483 |
|
|
484 |
|
|
485 |
template<class T> int |
template<class T, bool T_DEEP_COPY> |
486 |
RingBuffer<T>::read (T *dest, int cnt) |
int RingBuffer<T,T_DEEP_COPY>::read(T* dest, int cnt) |
|
|
|
487 |
{ |
{ |
488 |
int free_cnt; |
int free_cnt; |
489 |
int cnt2; |
int cnt2; |
491 |
int n1, n2; |
int n1, n2; |
492 |
int priv_read_ptr; |
int priv_read_ptr; |
493 |
|
|
494 |
priv_read_ptr=atomic_read(&read_ptr); |
priv_read_ptr = read_ptr.load(memory_order_relaxed); |
495 |
|
|
496 |
if ((free_cnt = read_space ()) == 0) { |
if ((free_cnt = read_space ()) == 0) { |
497 |
return 0; |
return 0; |
509 |
n2 = 0; |
n2 = 0; |
510 |
} |
} |
511 |
|
|
512 |
memcpy (dest, &buf[priv_read_ptr], n1 * sizeof (T)); |
copy(dest, &buf[priv_read_ptr], n1); |
513 |
priv_read_ptr = (priv_read_ptr + n1) & size_mask; |
priv_read_ptr = (priv_read_ptr + n1) & size_mask; |
514 |
|
|
515 |
if (n2) { |
if (n2) { |
516 |
memcpy (dest+n1, buf, n2 * sizeof (T)); |
copy(dest+n1, buf, n2); |
517 |
priv_read_ptr = n2; |
priv_read_ptr = n2; |
518 |
} |
} |
519 |
|
|
520 |
atomic_set(&read_ptr, priv_read_ptr); |
read_ptr.store(priv_read_ptr, memory_order_release); |
521 |
return to_read; |
return to_read; |
522 |
} |
} |
523 |
|
|
524 |
template<class T> int |
template<class T, bool T_DEEP_COPY> |
525 |
RingBuffer<T>::write (T *src, int cnt) |
int RingBuffer<T,T_DEEP_COPY>::write(T* src, int cnt) |
|
|
|
526 |
{ |
{ |
527 |
int free_cnt; |
int free_cnt; |
528 |
int cnt2; |
int cnt2; |
530 |
int n1, n2; |
int n1, n2; |
531 |
int priv_write_ptr; |
int priv_write_ptr; |
532 |
|
|
533 |
priv_write_ptr=atomic_read(&write_ptr); |
priv_write_ptr = write_ptr.load(memory_order_relaxed); |
534 |
|
|
535 |
if ((free_cnt = write_space ()) == 0) { |
if ((free_cnt = write_space ()) == 0) { |
536 |
return 0; |
return 0; |
548 |
n2 = 0; |
n2 = 0; |
549 |
} |
} |
550 |
|
|
551 |
memcpy (&buf[priv_write_ptr], src, n1 * sizeof (T)); |
copy(&buf[priv_write_ptr], src, n1); |
552 |
priv_write_ptr = (priv_write_ptr + n1) & size_mask; |
priv_write_ptr = (priv_write_ptr + n1) & size_mask; |
553 |
|
|
554 |
if (n2) { |
if (n2) { |
555 |
memcpy (buf, src+n1, n2 * sizeof (T)); |
copy(buf, src+n1, n2); |
556 |
priv_write_ptr = n2; |
priv_write_ptr = n2; |
557 |
} |
} |
558 |
atomic_set(&write_ptr, priv_write_ptr); |
write_ptr.store(priv_write_ptr, memory_order_release); |
559 |
return to_write; |
return to_write; |
560 |
} |
} |
561 |
|
|
562 |
|
template<class T, bool T_DEEP_COPY> |
563 |
|
void RingBuffer<T,T_DEEP_COPY>::copy(T* pDst, T* pSrc, int n) { |
564 |
|
if (T_DEEP_COPY) { // deep copy - won't work for data structures without assignment operator implementation |
565 |
|
for (int i = 0; i < n; i++) pDst[i] = pSrc[i]; |
566 |
|
} else { // flat copy - won't work for complex data structures ! |
567 |
|
memcpy(pDst, pSrc, n * sizeof(T)); |
568 |
|
} |
569 |
|
} |
570 |
|
|
571 |
#endif /* RINGBUFFER_H */ |
#endif /* RINGBUFFER_H */ |