Skip to content

Commit f87de76

Browse files
marcanx42
authored andcommitted
RCU: Fix race condition between writer/reader
This uses an atomic counter and spins only on the writer side, which preserves realtime behavior on the reader side. The spinning yields (by using the same Boost function from Boost spinlocks) to be scheduler-friendly. Fixing this bug also lets us be able to confidently drop garbage early in the writer if appropriate, so do that and avoid keeping dead wood if possible. This reverts commit f95439a: "add spinlock to RCU manager to protect concurrent reader() and update() calls"
1 parent 55c2c9d commit f87de76

File tree

1 file changed

+32
-34
lines changed

1 file changed

+32
-34
lines changed

libs/pbd/pbd/rcu.h

Lines changed: 32 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -21,12 +21,12 @@
2121
#define __pbd_rcu_h__
2222

2323
#include "boost/shared_ptr.hpp"
24+
#include "boost/smart_ptr/detail/yield_k.hpp"
2425
#include "glibmm/threads.h"
2526

2627
#include <list>
2728

2829
#include "pbd/libpbd_visibility.h"
29-
#include "pbd/spinlock.h"
3030

3131
/** @file rcu.h
3232
* Define a set of classes to implement Read-Copy-Update. We do not attempt to define RCU here - use google.
@@ -52,30 +52,22 @@ class /*LIBPBD_API*/ RCUManager
5252
{
5353
public:
5454

55-
RCUManager (T* new_rcu_value) {
55+
RCUManager (T* new_rcu_value) : active_reads(0) {
5656
x.m_rcu_value = new boost::shared_ptr<T> (new_rcu_value);
5757
}
5858

5959
virtual ~RCUManager() { delete x.m_rcu_value; }
6060

6161
boost::shared_ptr<T> reader () const {
6262
boost::shared_ptr<T> rv;
63-
{
64-
/* we take and hold this lock while setting up rv
65-
(notably increasing the reference count shared by
66-
all shared_ptr<T> that reference the same object as
67-
m_rcu_value. This prevents and update() call from
68-
deleting the shared_ptr<T> while we do this.
69-
70-
The atomic pointer fetch only ensures that we can
71-
atomically read the ptr-to-shared-ptr. It does not
72-
protect the internal structure of the shared_ptr<T>
73-
which could otherwise be deleted by update() while
74-
we use it.
75-
*/
76-
PBD::SpinLock sl (_spinlock);
77-
rv = *((boost::shared_ptr<T> *) g_atomic_pointer_get (&x.gptr));
78-
}
63+
64+
// Keep count of any readers in this section of code, so writers can
65+
// wait until m_rcu_value is no longer in use after an atomic exchange
66+
// before dropping it.
67+
g_atomic_int_inc(&active_reads);
68+
rv = *((boost::shared_ptr<T> *) g_atomic_pointer_get (&x.gptr));
69+
g_atomic_int_dec_and_test(&active_reads);
70+
7971
return rv;
8072
}
8173

@@ -102,7 +94,7 @@ class /*LIBPBD_API*/ RCUManager
10294
mutable volatile gpointer gptr;
10395
} x;
10496

105-
mutable PBD::spinlock_t _spinlock;
97+
mutable volatile gint active_reads;
10698
};
10799

108100

@@ -194,25 +186,31 @@ class /*LIBPBD_API*/ SerializedRCUManager : public RCUManager<T>
194186

195187
if (ret) {
196188

197-
// successful update : put the old value into dead_wood,
189+
// successful update
198190

199-
m_dead_wood.push_back (*current_write_old);
191+
// wait until there are no active readers. This ensures that any
192+
// references to the old value have been fully copied into a new
193+
// shared_ptr, and thus have had their reference count incremented.
200194

201-
/* now delete it - this gets rid of the shared_ptr<T> but
202-
* because dead_wood contains another shared_ptr<T> that
203-
* references the same T, the underlying object lives
204-
* on.
205-
*
206-
* We still need to use the spinlock to ensure that a
207-
* call to reader() that is in the middle of increasing
208-
* the reference count to the underlying T from
209-
* operating on a corrupted shared_ptr<T>
210-
*/
195+
for (unsigned i = 0; g_atomic_int_get(&(RCUManager<T>::active_reads)) != 0; ++i) {
196+
// spin being nice to the scheduler/CPU
197+
boost::detail::yield(i);
198+
}
199+
200+
// if we are not the only user, put the old value into dead_wood.
201+
// if we are the only user, then it is safe to drop it here.
211202

212-
{
213-
PBD::SpinLock sl (RCUManager<T>::_spinlock);
214-
delete current_write_old;
203+
if (!current_write_old->unique()) {
204+
m_dead_wood.push_back (*current_write_old);
215205
}
206+
207+
// now delete it - if we are the only user, this deletes the
208+
// underlying object. if other users existed, then there will
209+
// be an extra reference in m_dead_wood, ensuring that the
210+
// underlying object lives on even when the other users
211+
// are done with it
212+
213+
delete current_write_old;
216214
}
217215

218216
/* unlock, allowing other writers to proceed */

0 commit comments

Comments
 (0)