Line data Source code
1 : #ifndef _ASM_X86_SPINLOCK_H
2 : #define _ASM_X86_SPINLOCK_H
3 :
4 : #include <linux/jump_label.h>
5 : #include <linux/atomic.h>
6 : #include <asm/page.h>
7 : #include <asm/processor.h>
8 : #include <linux/compiler.h>
9 : #include <asm/paravirt.h>
10 : #include <asm/bitops.h>
11 :
12 : /*
13 : * Your basic SMP spinlocks, allowing only a single CPU anywhere
14 : *
15 : * Simple spin lock operations. There are two variants, one clears IRQ's
16 : * on the local processor, one does not.
17 : *
18 : * These are fair FIFO ticket locks, which support up to 2^16 CPUs.
19 : *
20 : * (the type definitions are in asm/spinlock_types.h)
21 : */
22 :
23 : #ifdef CONFIG_X86_32
24 : # define LOCK_PTR_REG "a"
25 : #else
26 : # define LOCK_PTR_REG "D"
27 : #endif
28 :
29 : #if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE))
30 : /*
31 : * On PPro SMP, we use a locked operation to unlock
32 : * (PPro errata 66, 92)
33 : */
34 : # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
35 : #else
36 : # define UNLOCK_LOCK_PREFIX
37 : #endif
38 :
39 : /* How long a lock should spin before we consider blocking */
40 : #define SPIN_THRESHOLD (1 << 15)
41 :
42 : extern struct static_key paravirt_ticketlocks_enabled;
43 : static __always_inline bool static_key_false(struct static_key *key);
44 :
45 : #ifdef CONFIG_PARAVIRT_SPINLOCKS
46 :
47 : static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
48 : {
49 : set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
50 : }
51 :
52 : #else /* !CONFIG_PARAVIRT_SPINLOCKS */
53 : static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
54 : __ticket_t ticket)
55 : {
56 : }
57 : static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
58 : __ticket_t ticket)
59 : {
60 : }
61 :
62 : #endif /* CONFIG_PARAVIRT_SPINLOCKS */
63 :
64 : static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
65 : {
66 : return lock.tickets.head == lock.tickets.tail;
67 : }
68 :
69 : /*
70 : * Ticket locks are conceptually two parts, one indicating the current head of
71 : * the queue, and the other indicating the current tail. The lock is acquired
72 : * by atomically noting the tail and incrementing it by one (thus adding
73 : * ourself to the queue and noting our position), then waiting until the head
74 : * becomes equal to the the initial value of the tail.
75 : *
76 : * We use an xadd covering *both* parts of the lock, to increment the tail and
77 : * also load the position of the head, which takes care of memory ordering
78 : * issues and should be optimal for the uncontended case. Note the tail must be
79 : * in the high part, because a wide xadd increment of the low part would carry
80 : * up and contaminate the high part.
81 : */
82 : static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
83 : {
84 : register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
85 :
86 : inc = xadd(&lock->tickets, inc);
87 : if (likely(inc.head == inc.tail))
88 : goto out;
89 :
90 : inc.tail &= ~TICKET_SLOWPATH_FLAG;
91 : for (;;) {
92 : unsigned count = SPIN_THRESHOLD;
93 :
94 : do {
95 : if (ACCESS_ONCE(lock->tickets.head) == inc.tail)
96 : goto out;
97 : cpu_relax();
98 : } while (--count);
99 : __ticket_lock_spinning(lock, inc.tail);
100 : }
101 : out: barrier(); /* make sure nothing creeps before the lock is taken */
102 : }
103 :
104 : static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
105 : {
106 : arch_spinlock_t old, new;
107 :
108 : old.tickets = ACCESS_ONCE(lock->tickets);
109 : if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
110 : return 0;
111 :
112 : new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
113 :
114 : /* cmpxchg is a full barrier, so nothing can move before it */
115 : return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
116 : }
117 :
118 : static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
119 : arch_spinlock_t old)
120 : {
121 : arch_spinlock_t new;
122 :
123 : BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
124 :
125 : /* Perform the unlock on the "before" copy */
126 : old.tickets.head += TICKET_LOCK_INC;
127 :
128 : /* Clear the slowpath flag */
129 : new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
130 :
131 : /*
132 : * If the lock is uncontended, clear the flag - use cmpxchg in
133 : * case it changes behind our back though.
134 : */
135 : if (new.tickets.head != new.tickets.tail ||
136 : cmpxchg(&lock->head_tail, old.head_tail,
137 : new.head_tail) != old.head_tail) {
138 : /*
139 : * Lock still has someone queued for it, so wake up an
140 : * appropriate waiter.
141 : */
142 : __ticket_unlock_kick(lock, old.tickets.head);
143 : }
144 : }
145 :
146 : static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
147 : {
148 : if (TICKET_SLOWPATH_FLAG &&
149 : static_key_false(¶virt_ticketlocks_enabled)) {
150 : arch_spinlock_t prev;
151 :
152 : prev = *lock;
153 : add_smp(&lock->tickets.head, TICKET_LOCK_INC);
154 :
155 : /* add_smp() is a full mb() */
156 :
157 : if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
158 : __ticket_unlock_slowpath(lock, prev);
159 : } else
160 23956349 : __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
161 : }
162 :
163 : static inline int arch_spin_is_locked(arch_spinlock_t *lock)
164 : {
165 584663 : struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
166 :
167 584663 : return tmp.tail != tmp.head;
168 : }
169 :
170 : static inline int arch_spin_is_contended(arch_spinlock_t *lock)
171 : {
172 : struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
173 :
174 : return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
175 : }
176 : #define arch_spin_is_contended arch_spin_is_contended
177 :
178 : static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
179 : unsigned long flags)
180 : {
181 : arch_spin_lock(lock);
182 : }
183 :
184 : static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
185 : {
186 : while (arch_spin_is_locked(lock))
187 : cpu_relax();
188 : }
189 :
190 : #ifndef CONFIG_QUEUE_RWLOCK
191 : /*
192 : * Read-write spinlocks, allowing multiple readers
193 : * but only one writer.
194 : *
195 : * NOTE! it is quite common to have readers in interrupts
196 : * but no interrupt writers. For those circumstances we
197 : * can "mix" irq-safe locks - any writer needs to get a
198 : * irq-safe write-lock, but readers can get non-irqsafe
199 : * read-locks.
200 : *
201 : * On x86, we implement read-write locks as a 32-bit counter
202 : * with the high bit (sign) being the "contended" bit.
203 : */
204 :
205 : /**
206 : * read_can_lock - would read_trylock() succeed?
207 : * @lock: the rwlock in question.
208 : */
209 : static inline int arch_read_can_lock(arch_rwlock_t *lock)
210 : {
211 : return lock->lock > 0;
212 : }
213 :
214 : /**
215 : * write_can_lock - would write_trylock() succeed?
216 : * @lock: the rwlock in question.
217 : */
218 : static inline int arch_write_can_lock(arch_rwlock_t *lock)
219 : {
220 : return lock->write == WRITE_LOCK_CMP;
221 : }
222 :
223 : static inline void arch_read_lock(arch_rwlock_t *rw)
224 : {
225 : asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
226 : "jns 1f\n"
227 : "call __read_lock_failed\n\t"
228 : "1:\n"
229 : ::LOCK_PTR_REG (rw) : "memory");
230 : }
231 :
232 : static inline void arch_write_lock(arch_rwlock_t *rw)
233 : {
234 : asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
235 : "jz 1f\n"
236 : "call __write_lock_failed\n\t"
237 : "1:\n"
238 : ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS)
239 : : "memory");
240 : }
241 :
242 : static inline int arch_read_trylock(arch_rwlock_t *lock)
243 : {
244 : READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock;
245 :
246 : if (READ_LOCK_ATOMIC(dec_return)(count) >= 0)
247 : return 1;
248 : READ_LOCK_ATOMIC(inc)(count);
249 : return 0;
250 : }
251 :
252 : static inline int arch_write_trylock(arch_rwlock_t *lock)
253 : {
254 : atomic_t *count = (atomic_t *)&lock->write;
255 :
256 : if (atomic_sub_and_test(WRITE_LOCK_CMP, count))
257 : return 1;
258 : atomic_add(WRITE_LOCK_CMP, count);
259 : return 0;
260 : }
261 :
262 : static inline void arch_read_unlock(arch_rwlock_t *rw)
263 : {
264 : asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
265 : :"+m" (rw->lock) : : "memory");
266 : }
267 :
268 : static inline void arch_write_unlock(arch_rwlock_t *rw)
269 : {
270 : asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
271 : : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
272 : }
273 : #else
274 : #include <asm/qrwlock.h>
275 : #endif /* CONFIG_QUEUE_RWLOCK */
276 :
277 : #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
278 : #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
279 :
280 : #undef READ_LOCK_SIZE
281 : #undef READ_LOCK_ATOMIC
282 : #undef WRITE_LOCK_ADD
283 : #undef WRITE_LOCK_SUB
284 : #undef WRITE_LOCK_CMP
285 :
286 : #define arch_spin_relax(lock) cpu_relax()
287 : #define arch_read_relax(lock) cpu_relax()
288 : #define arch_write_relax(lock) cpu_relax()
289 :
290 : #endif /* _ASM_X86_SPINLOCK_H */
|