21
21
#define __pbd_rcu_h__
22
22
23
23
#include " boost/shared_ptr.hpp"
24
+ #include " boost/smart_ptr/detail/yield_k.hpp"
24
25
#include " glibmm/threads.h"
25
26
26
27
#include < list>
27
28
28
29
#include " pbd/libpbd_visibility.h"
29
- #include " pbd/spinlock.h"
30
30
31
31
/* * @file rcu.h
32
32
* Define a set of classes to implement Read-Copy-Update. We do not attempt to define RCU here - use google.
@@ -52,30 +52,22 @@ class /*LIBPBD_API*/ RCUManager
52
52
{
53
53
public:
54
54
55
- RCUManager (T* new_rcu_value) {
55
+ RCUManager (T* new_rcu_value) : active_reads( 0 ) {
56
56
x.m_rcu_value = new boost::shared_ptr<T> (new_rcu_value);
57
57
}
58
58
59
59
virtual ~RCUManager () { delete x.m_rcu_value ; }
60
60
61
61
boost::shared_ptr<T> reader () const {
62
62
boost::shared_ptr<T> rv;
63
- {
64
- /* we take and hold this lock while setting up rv
65
- (notably increasing the reference count shared by
66
- all shared_ptr<T> that reference the same object as
67
- m_rcu_value. This prevents and update() call from
68
- deleting the shared_ptr<T> while we do this.
69
-
70
- The atomic pointer fetch only ensures that we can
71
- atomically read the ptr-to-shared-ptr. It does not
72
- protect the internal structure of the shared_ptr<T>
73
- which could otherwise be deleted by update() while
74
- we use it.
75
- */
76
- PBD::SpinLock sl (_spinlock);
77
- rv = *((boost::shared_ptr<T> *) g_atomic_pointer_get (&x.gptr ));
78
- }
63
+
64
+ // Keep count of any readers in this section of code, so writers can
65
+ // wait until m_rcu_value is no longer in use after an atomic exchange
66
+ // before dropping it.
67
+ g_atomic_int_inc (&active_reads);
68
+ rv = *((boost::shared_ptr<T> *) g_atomic_pointer_get (&x.gptr ));
69
+ g_atomic_int_dec_and_test (&active_reads);
70
+
79
71
return rv;
80
72
}
81
73
@@ -102,7 +94,7 @@ class /*LIBPBD_API*/ RCUManager
102
94
mutable volatile gpointer gptr;
103
95
} x;
104
96
105
- mutable PBD:: spinlock_t _spinlock ;
97
+ mutable volatile gint active_reads ;
106
98
};
107
99
108
100
@@ -194,25 +186,31 @@ class /*LIBPBD_API*/ SerializedRCUManager : public RCUManager<T>
194
186
195
187
if (ret) {
196
188
197
- // successful update : put the old value into dead_wood,
189
+ // successful update
198
190
199
- m_dead_wood.push_back (*current_write_old);
191
+ // wait until there are no active readers. This ensures that any
192
+ // references to the old value have been fully copied into a new
193
+ // shared_ptr, and thus have had their reference count incremented.
200
194
201
- /* now delete it - this gets rid of the shared_ptr<T> but
202
- * because dead_wood contains another shared_ptr<T> that
203
- * references the same T, the underlying object lives
204
- * on.
205
- *
206
- * We still need to use the spinlock to ensure that a
207
- * call to reader() that is in the middle of increasing
208
- * the reference count to the underlying T from
209
- * operating on a corrupted shared_ptr<T>
210
- */
195
+ for (unsigned i = 0 ; g_atomic_int_get (&(RCUManager<T>::active_reads)) != 0 ; ++i) {
196
+ // spin being nice to the scheduler/CPU
197
+ boost::detail::yield (i);
198
+ }
199
+
200
+ // if we are not the only user, put the old value into dead_wood.
201
+ // if we are the only user, then it is safe to drop it here.
211
202
212
- {
213
- PBD::SpinLock sl (RCUManager<T>::_spinlock);
214
- delete current_write_old;
203
+ if (!current_write_old->unique ()) {
204
+ m_dead_wood.push_back (*current_write_old);
215
205
}
206
+
207
+ // now delete it - if we are the only user, this deletes the
208
+ // underlying object. if other users existed, then there will
209
+ // be an extra reference in m_dead_wood, ensuring that the
210
+ // underlying object lives on even when the other users
211
+ // are done with it
212
+
213
+ delete current_write_old;
216
214
}
217
215
218
216
/* unlock, allowing other writers to proceed */
0 commit comments