Lockfree ringbuffer review
- From: Stefan Westerfeld <stefan space twc de>
- To: beast gnome org
- Cc: timj gtk org
- Subject: Lockfree ringbuffer review
- Date: Tue, 30 May 2006 14:26:20 +0200
Hi!
I've just completed writing a lock free ringbuffer to get a JACK driver
implemented. The only special thing is that it is frame based, because I
saw that the JACK driver code itself really got messy if you always have
to ensure that you don't by accident read or write half frames from/to
the ringbuffer.
So here you can create a ringbuffer with 1024 frames of float audio
data, where each frame contains two floats (for the channels), by
constructing a FrameRingBuffer<float> (1024, 2), and the ringbuffer
itself will ensure that you only deal with complete frames afterwards.
I would really like to get some review for the code. It works within the
JACK driver, but that of course doesn't say that its bug-free.
template<class T>
class FrameRingBuffer {
//BIRNET_PRIVATE_COPY (FrameRingBuffer);
private:
vector<T> buffer;
typedef typename vector<T>::iterator BufferIterator;
int read_frame_pos;
int write_frame_pos;
guint elements_per_frame;
public:
FrameRingBuffer (guint n_frames = 0,
guint elements_per_frame = 1)
{
resize (n_frames, elements_per_frame);
}
/**
* checks available read space in the ringbuffer
*
* @returns the number of frames that are available for reading
*/
guint
read_space()
{
int wpos = Atomic::int_get (&write_frame_pos);
int rpos = Atomic::int_get (&read_frame_pos);
int size = buffer.size() / elements_per_frame;
if (wpos < rpos) /* wpos == rpos -> empty ringbuffer */
wpos += size;
return wpos - rpos;
}
/**
* reads data from the ringbuffer
*
* @returns the number of successfully read frames
*/
guint
read (guint n_frames, T* frames)
{
int rpos = Atomic::int_get (&read_frame_pos);
guint size = buffer.size() / elements_per_frame;
guint can_read = min (read_space(), n_frames);
BufferIterator start = buffer.begin() + rpos * elements_per_frame;
guint read1 = min (can_read, size - rpos) * elements_per_frame;
copy (start, start + read1, frames);
guint read2 = can_read * elements_per_frame - read1;
copy (buffer.begin(), buffer.begin() + read2, frames + read1);
Atomic::int_set (&read_frame_pos, (rpos + can_read) % size);
return can_read;
}
/**
* checks available write space in the ringbuffer
*
* @returns the number of frames that can be written
*/
guint
write_space()
{
int wpos = Atomic::int_get (&write_frame_pos);
int rpos = Atomic::int_get (&read_frame_pos);
int size = buffer.size() / elements_per_frame;
if (rpos <= wpos) /* wpos == rpos -> empty ringbuffer */
rpos += size;
/* the extra element allows us to see the difference between
* - empty ringbuffer
* - full ringbuffer
*/
return rpos - wpos - 1;
}
/**
* writes data to the ringbuffer
*
* @returns the number of successfully written frames
*/
guint
write (guint n_frames, const T* frames)
{
int wpos = Atomic::int_get (&write_frame_pos);
guint size = buffer.size() / elements_per_frame;
guint can_write = min (write_space(), n_frames);
BufferIterator start = buffer.begin() + wpos * elements_per_frame;
guint write1 = min (can_write, size - wpos) * elements_per_frame;
copy (frames, frames + write1, start);
guint write2 = can_write * elements_per_frame - write1;
copy (frames + write1, frames + write1 + write2, buffer.begin());
Atomic::int_set (&write_frame_pos, (wpos + can_write) % size);
return can_write;
}
/**
* returns the maximum number of frames that the ringbuffer can contain
*/
guint
size() const
{
return (buffer.size() - 1) / elements_per_frame;
}
/**
* clears the ringbuffer
* this function is not! threadsafe
*/
void
clear()
{
Atomic::int_set (&read_frame_pos, 0);
Atomic::int_set (&write_frame_pos, 0);
}
/**
* resizes and clears the ringbuffer
* this function is not! threadsafe
*/
void
resize (guint n_frames,
guint elements_per_frame = 1)
{
this->elements_per_frame = elements_per_frame;
buffer.resize ((n_frames + 1) * elements_per_frame);
clear();
}
};
Cu... Stefan
--
Stefan Westerfeld, Hamburg/Germany, http://space.twc.de/~stefan
[
Date Prev][
Date Next] [
Thread Prev][
Thread Next]
[
Thread Index]
[
Date Index]
[
Author Index]