Skip to content

Commit e9e345f

Browse files
Spinlock: move definitions from kernel headers to runtime headers
This will allow spinlock code to be compiled for both kernel and non-kernel usage without having to use `#ifdef KERNEL` directives.
1 parent d449207 commit e9e345f

File tree

4 files changed

+194
-196
lines changed

4 files changed

+194
-196
lines changed

src/kernel/kernel.h

Lines changed: 0 additions & 163 deletions
Original file line numberDiff line numberDiff line change
@@ -75,169 +75,6 @@ typedef struct sched_task {
7575

7676
extern vector cpuinfos;
7777

78-
#if defined(KERNEL) && defined(SMP_ENABLE)
79-
static inline boolean spin_try(spinlock l)
80-
{
81-
boolean success = compare_and_swap_64(&l->w, 0, 1);
82-
#ifdef LOCK_STATS
83-
LOCKSTATS_RECORD_LOCK(l->s, success, 0, 0);
84-
#endif
85-
return success;
86-
}
87-
88-
static inline void spin_lock(spinlock l)
89-
{
90-
volatile u64 *p = (volatile u64 *)&l->w;
91-
#ifdef LOCK_STATS
92-
u64 spins = 0;
93-
while (*p || !compare_and_swap_64(&l->w, 0, 1)) {
94-
spins++;
95-
kern_pause();
96-
}
97-
LOCKSTATS_RECORD_LOCK(l->s, true, spins, 0);
98-
#else
99-
while (*p || !compare_and_swap_64(&l->w, 0, 1))
100-
kern_pause();
101-
#endif
102-
}
103-
104-
static inline void spin_unlock(spinlock l)
105-
{
106-
#ifdef LOCK_STATS
107-
LOCKSTATS_RECORD_UNLOCK(l->s);
108-
#endif
109-
compiler_barrier();
110-
*(volatile u64 *)&l->w = 0;
111-
}
112-
113-
static inline boolean spin_tryrlock(rw_spinlock l)
114-
{
115-
if (*(volatile word *)&l->l.w)
116-
return false;
117-
fetch_and_add(&l->readers, 1);
118-
if (!*(volatile word *)&l->l.w)
119-
return true;
120-
fetch_and_add(&l->readers, -1);
121-
return false;
122-
}
123-
124-
static inline void spin_rlock(rw_spinlock l)
125-
{
126-
while (1) {
127-
if (*(volatile word *)&l->l.w) {
128-
kern_pause();
129-
continue;
130-
}
131-
fetch_and_add(&l->readers, 1);
132-
if (!*(volatile word *)&l->l.w)
133-
return;
134-
fetch_and_add(&l->readers, -1);
135-
}
136-
}
137-
138-
static inline void spin_runlock(rw_spinlock l)
139-
{
140-
fetch_and_add(&l->readers, -1);
141-
}
142-
143-
static inline boolean spin_trywlock(rw_spinlock l)
144-
{
145-
if (*(volatile word *)&l->readers || !spin_try(&l->l))
146-
return false;
147-
if (!*(volatile word *)&l->readers)
148-
return true;
149-
spin_unlock(&l->l);
150-
return false;
151-
}
152-
153-
static inline void spin_wlock(rw_spinlock l)
154-
{
155-
spin_lock(&l->l);
156-
while (*(volatile word *)&l->readers)
157-
kern_pause();
158-
}
159-
160-
static inline void spin_wunlock(rw_spinlock l)
161-
{
162-
spin_unlock(&l->l);
163-
}
164-
#else
165-
#ifdef SPIN_LOCK_DEBUG_NOSMP
166-
u64 get_program_counter(void);
167-
168-
static inline boolean spin_try(spinlock l)
169-
{
170-
if (l->w)
171-
return false;
172-
l->w = get_program_counter();
173-
return true;
174-
}
175-
176-
static inline void spin_lock(spinlock l)
177-
{
178-
if (l->w != 0) {
179-
print_frame_trace_from_here();
180-
halt("spin_lock: lock %p already locked by 0x%lx\n", l, l->w);
181-
}
182-
l->w = get_program_counter();
183-
}
184-
185-
static inline void spin_unlock(spinlock l)
186-
{
187-
assert(l->w != 1);
188-
l->w = 0;
189-
}
190-
191-
static inline boolean spin_tryrlock(rw_spinlock l)
192-
{
193-
if (l->l.w)
194-
return false;
195-
l->readers++;
196-
return true;
197-
}
198-
199-
static inline void spin_rlock(rw_spinlock l) {
200-
assert(l->l.w == 0);
201-
assert(l->readers == 0);
202-
l->readers++;
203-
}
204-
205-
static inline void spin_runlock(rw_spinlock l) {
206-
assert(l->readers == 1);
207-
assert(l->l.w == 0);
208-
l->readers--;
209-
}
210-
211-
static inline boolean spin_trywlock(rw_spinlock l)
212-
{
213-
if (l->readers || l->l.w)
214-
return false;
215-
assert(spin_try(&l->l));
216-
return true;
217-
}
218-
219-
static inline void spin_wlock(rw_spinlock l) {
220-
assert(l->readers == 0);
221-
spin_lock(&l->l);
222-
}
223-
224-
static inline void spin_wunlock(rw_spinlock l) {
225-
assert(l->readers == 0);
226-
spin_unlock(&l->l);
227-
}
228-
#else
229-
#define spin_try(x) (true)
230-
#define spin_lock(x) ((void)x)
231-
#define spin_unlock(x) ((void)x)
232-
#define spin_trywlock(x) (true)
233-
#define spin_wlock(x) ((void)x)
234-
#define spin_wunlock(x) ((void)x)
235-
#define spin_tryrlock(x) (true)
236-
#define spin_rlock(x) ((void)x)
237-
#define spin_runlock(x) ((void)x)
238-
#endif
239-
#endif
240-
24178
#ifdef KERNEL
24279
typedef struct sched_queue {
24380
pqueue q;

src/kernel/lock.h

Lines changed: 0 additions & 31 deletions
This file was deleted.

src/runtime/lock.h

Lines changed: 194 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,194 @@
1+
#ifdef LOCK_STATS
2+
#include <lockstats_struct.h>
3+
#endif
4+
5+
typedef struct spinlock {
6+
word w;
7+
#ifdef LOCK_STATS
8+
struct lockstats_lock s;
9+
#endif
10+
} *spinlock;
11+
12+
typedef struct rw_spinlock {
13+
struct spinlock l;
14+
word readers;
15+
} *rw_spinlock;
16+
17+
static inline void spin_lock_init(spinlock l)
18+
{
19+
l->w = 0;
20+
#ifdef LOCK_STATS
21+
l->s.type = LOCK_TYPE_SPIN;
22+
l->s.acq_time = 0;
23+
l->s.trace_hash = 0;
24+
#endif
25+
}
26+
27+
static inline void spin_rw_lock_init(rw_spinlock l)
28+
{
29+
spin_lock_init(&l->l);
30+
l->readers = 0;
31+
}
32+
33+
#if defined(KERNEL) && defined(SMP_ENABLE)
34+
static inline boolean spin_try(spinlock l)
35+
{
36+
boolean success = compare_and_swap_64(&l->w, 0, 1);
37+
#ifdef LOCK_STATS
38+
LOCKSTATS_RECORD_LOCK(l->s, success, 0, 0);
39+
#endif
40+
return success;
41+
}
42+
43+
static inline void spin_lock(spinlock l)
44+
{
45+
volatile u64 *p = (volatile u64 *)&l->w;
46+
#ifdef LOCK_STATS
47+
u64 spins = 0;
48+
while (*p || !compare_and_swap_64(&l->w, 0, 1)) {
49+
spins++;
50+
kern_pause();
51+
}
52+
LOCKSTATS_RECORD_LOCK(l->s, true, spins, 0);
53+
#else
54+
while (*p || !compare_and_swap_64(&l->w, 0, 1))
55+
kern_pause();
56+
#endif
57+
}
58+
59+
static inline void spin_unlock(spinlock l)
60+
{
61+
#ifdef LOCK_STATS
62+
LOCKSTATS_RECORD_UNLOCK(l->s);
63+
#endif
64+
compiler_barrier();
65+
*(volatile u64 *)&l->w = 0;
66+
}
67+
68+
static inline boolean spin_tryrlock(rw_spinlock l)
69+
{
70+
if (*(volatile word *)&l->l.w)
71+
return false;
72+
fetch_and_add(&l->readers, 1);
73+
if (!*(volatile word *)&l->l.w)
74+
return true;
75+
fetch_and_add(&l->readers, -1);
76+
return false;
77+
}
78+
79+
static inline void spin_rlock(rw_spinlock l)
80+
{
81+
while (1) {
82+
if (*(volatile word *)&l->l.w) {
83+
kern_pause();
84+
continue;
85+
}
86+
fetch_and_add(&l->readers, 1);
87+
if (!*(volatile word *)&l->l.w)
88+
return;
89+
fetch_and_add(&l->readers, -1);
90+
}
91+
}
92+
93+
static inline void spin_runlock(rw_spinlock l)
94+
{
95+
fetch_and_add(&l->readers, -1);
96+
}
97+
98+
static inline boolean spin_trywlock(rw_spinlock l)
99+
{
100+
if (*(volatile word *)&l->readers || !spin_try(&l->l))
101+
return false;
102+
if (!*(volatile word *)&l->readers)
103+
return true;
104+
spin_unlock(&l->l);
105+
return false;
106+
}
107+
108+
static inline void spin_wlock(rw_spinlock l)
109+
{
110+
spin_lock(&l->l);
111+
while (*(volatile word *)&l->readers)
112+
kern_pause();
113+
}
114+
115+
static inline void spin_wunlock(rw_spinlock l)
116+
{
117+
spin_unlock(&l->l);
118+
}
119+
#else
120+
#ifdef SPIN_LOCK_DEBUG_NOSMP
121+
u64 get_program_counter(void);
122+
123+
static inline boolean spin_try(spinlock l)
124+
{
125+
if (l->w)
126+
return false;
127+
l->w = get_program_counter();
128+
return true;
129+
}
130+
131+
static inline void spin_lock(spinlock l)
132+
{
133+
if (l->w != 0) {
134+
print_frame_trace_from_here();
135+
halt("spin_lock: lock %p already locked by 0x%lx\n", l, l->w);
136+
}
137+
l->w = get_program_counter();
138+
}
139+
140+
static inline void spin_unlock(spinlock l)
141+
{
142+
assert(l->w != 1);
143+
l->w = 0;
144+
}
145+
146+
static inline boolean spin_tryrlock(rw_spinlock l)
147+
{
148+
if (l->l.w)
149+
return false;
150+
l->readers++;
151+
return true;
152+
}
153+
154+
static inline void spin_rlock(rw_spinlock l) {
155+
assert(l->l.w == 0);
156+
assert(l->readers == 0);
157+
l->readers++;
158+
}
159+
160+
static inline void spin_runlock(rw_spinlock l) {
161+
assert(l->readers == 1);
162+
assert(l->l.w == 0);
163+
l->readers--;
164+
}
165+
166+
static inline boolean spin_trywlock(rw_spinlock l)
167+
{
168+
if (l->readers || l->l.w)
169+
return false;
170+
assert(spin_try(&l->l));
171+
return true;
172+
}
173+
174+
static inline void spin_wlock(rw_spinlock l) {
175+
assert(l->readers == 0);
176+
spin_lock(&l->l);
177+
}
178+
179+
static inline void spin_wunlock(rw_spinlock l) {
180+
assert(l->readers == 0);
181+
spin_unlock(&l->l);
182+
}
183+
#else
184+
#define spin_try(x) (true)
185+
#define spin_lock(x) ((void)x)
186+
#define spin_unlock(x) ((void)x)
187+
#define spin_trywlock(x) (true)
188+
#define spin_wlock(x) ((void)x)
189+
#define spin_wunlock(x) ((void)x)
190+
#define spin_tryrlock(x) (true)
191+
#define spin_rlock(x) ((void)x)
192+
#define spin_runlock(x) ((void)x)
193+
#endif
194+
#endif

0 commit comments

Comments
 (0)