summaryrefslogtreecommitdiff
path: root/src/yqueue.hpp
diff options
context:
space:
mode:
authorMartin Sustrik <sustrik@250bpm.com>2010-03-09 08:43:20 +0100
committerMartin Sustrik <sustrik@250bpm.com>2010-03-09 08:43:20 +0100
commite04e2cdbbaf351eb04164bdcd293fcb8fa22a9a4 (patch)
tree419a8ff7db0495de57716d4595dc3d235c6ad6f6 /src/yqueue.hpp
parent9481c69b0f60068f12aa26699588fed6a8faceec (diff)
rollback functionality added to pipe
Diffstat (limited to 'src/yqueue.hpp')
-rw-r--r--src/yqueue.hpp37
1 files changed, 36 insertions, 1 deletions
diff --git a/src/yqueue.hpp b/src/yqueue.hpp
index 28b5fdd..9eaceb5 100644
--- a/src/yqueue.hpp
+++ b/src/yqueue.hpp
@@ -102,20 +102,54 @@ namespace zmq
chunk_t *sc = spare_chunk.xchg (NULL);
if (sc) {
end_chunk->next = sc;
+ sc->prev = end_chunk;
} else {
end_chunk->next = (chunk_t*) malloc (sizeof (chunk_t));
zmq_assert (end_chunk->next);
+ end_chunk->next->prev = end_chunk;
}
end_chunk = end_chunk->next;
end_pos = 0;
}
+ // Removes element from the back end of the queue. In other words
+ // it rollbacks last push to the queue. Take care: Caller is
+ // responsible for destroying the object being unpushed.
+ // The caller must also guarantee that the queue isn't empty when
+ // unpush is called. It cannot be done automatically as the read
+ // side of the queue can be managed by different, completely
+ // unsynchronised thread.
+ inline void unpush ()
+ {
+ // First, move 'back' one position backwards.
+ if (back_pos)
+ --back_pos;
+ else {
+ back_pos = N - 1;
+ back_chunk = back_chunk->prev;
+ }
+
+ // Now, move 'end' position backwards. Note that obsolete end chunk
+ // is not used as a spare chunk. The analysis shows that doing so
+ // would require free and atomic operation per chunk deallocated
+ // instead of a simple free.
+ if (end_pos)
+ --end_pos;
+ else {
+ end_pos = N - 1;
+ end_chunk = end_chunk->prev;
+ free (end_chunk->next);
+ end_chunk->next = NULL;
+ }
+ }
+
// Removes an element from the front end of the queue.
inline void pop ()
{
if (++ begin_pos == N) {
chunk_t *o = begin_chunk;
begin_chunk = begin_chunk->next;
+ begin_chunk->prev = NULL;
begin_pos = 0;
// 'o' has been more recently used than spare_chunk,
@@ -133,6 +167,7 @@ namespace zmq
struct chunk_t
{
T values [N];
+ chunk_t *prev;
chunk_t *next;
};
@@ -149,7 +184,7 @@ namespace zmq
// People are likely to produce and consume at similar rates. In
// this scenario holding onto the most recently freed chunk saves
- // us from having to call new/delete.
+ // us from having to call malloc/free.
atomic_ptr_t<chunk_t> spare_chunk;
// Disable copying of yqueue.