Line data Source code
1 : /*
2 : * Copyright (C) 2008 Oracle. All rights reserved.
3 : *
4 : * This program is free software; you can redistribute it and/or
5 : * modify it under the terms of the GNU General Public
6 : * License v2 as published by the Free Software Foundation.
7 : *
8 : * This program is distributed in the hope that it will be useful,
9 : * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 : * General Public License for more details.
12 : *
13 : * You should have received a copy of the GNU General Public
14 : * License along with this program; if not, write to the
15 : * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 : * Boston, MA 021110-1307, USA.
17 : */
18 : #include <linux/sched.h>
19 : #include <linux/pagemap.h>
20 : #include <linux/spinlock.h>
21 : #include <linux/page-flags.h>
22 : #include <asm/bug.h>
23 : #include "ctree.h"
24 : #include "extent_io.h"
25 : #include "locking.h"
26 :
27 : static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
28 :
29 : /*
30 : * if we currently have a spinning reader or writer lock
31 : * (indicated by the rw flag) this will bump the count
32 : * of blocking holders and drop the spinlock.
33 : */
34 1327290 : void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
35 : {
36 : /*
37 : * no lock is required. The lock owner may change if
38 : * we have a read lock, but it won't change to or away
39 : * from us. If we have the write lock, we are the owner
40 : * and it'll never change.
41 : */
42 1327430 : if (eb->lock_nested && current->pid == eb->lock_owner)
43 : return;
44 1327651 : if (rw == BTRFS_WRITE_LOCK) {
45 886641 : if (atomic_read(&eb->blocking_writers) == 0) {
46 794490 : WARN_ON(atomic_read(&eb->spinning_writers) != 1);
47 794490 : atomic_dec(&eb->spinning_writers);
48 : btrfs_assert_tree_locked(eb);
49 794852 : atomic_inc(&eb->blocking_writers);
50 : write_unlock(&eb->lock);
51 : }
52 441010 : } else if (rw == BTRFS_READ_LOCK) {
53 : btrfs_assert_tree_read_locked(eb);
54 420455 : atomic_inc(&eb->blocking_readers);
55 420572 : WARN_ON(atomic_read(&eb->spinning_readers) == 0);
56 420572 : atomic_dec(&eb->spinning_readers);
57 : read_unlock(&eb->lock);
58 : }
59 : return;
60 : }
61 :
62 : /*
63 : * if we currently have a blocking lock, take the spinlock
64 : * and drop our blocking count
65 : */
66 3781402 : void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
67 : {
68 : /*
69 : * no lock is required. The lock owner may change if
70 : * we have a read lock, but it won't change to or away
71 : * from us. If we have the write lock, we are the owner
72 : * and it'll never change.
73 : */
74 3781546 : if (eb->lock_nested && current->pid == eb->lock_owner)
75 : return;
76 :
77 3782361 : if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
78 326578 : BUG_ON(atomic_read(&eb->blocking_writers) != 1);
79 326578 : write_lock(&eb->lock);
80 326604 : WARN_ON(atomic_read(&eb->spinning_writers));
81 326604 : atomic_inc(&eb->spinning_writers);
82 979821 : if (atomic_dec_and_test(&eb->blocking_writers) &&
83 : waitqueue_active(&eb->write_lock_wq))
84 8238 : wake_up(&eb->write_lock_wq);
85 3455783 : } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
86 5404 : BUG_ON(atomic_read(&eb->blocking_readers) == 0);
87 5404 : read_lock(&eb->lock);
88 5404 : atomic_inc(&eb->spinning_readers);
89 15725 : if (atomic_dec_and_test(&eb->blocking_readers) &&
90 : waitqueue_active(&eb->read_lock_wq))
91 1859 : wake_up(&eb->read_lock_wq);
92 : }
93 : return;
94 : }
95 :
96 : /*
97 : * take a spinning read lock. This will wait for any blocking
98 : * writers
99 : */
100 2386077 : void btrfs_tree_read_lock(struct extent_buffer *eb)
101 : {
102 : again:
103 4785382 : BUG_ON(!atomic_read(&eb->blocking_writers) &&
104 : current->pid == eb->lock_owner);
105 :
106 2396216 : read_lock(&eb->lock);
107 2406670 : if (atomic_read(&eb->blocking_writers) &&
108 10297 : current->pid == eb->lock_owner) {
109 : /*
110 : * This extent is already write-locked by our thread. We allow
111 : * an additional read lock to be added because it's for the same
112 : * thread. btrfs_find_all_roots() depends on this as it may be
113 : * called on a partly (write-)locked tree.
114 : */
115 142 : BUG_ON(eb->lock_nested);
116 142 : eb->lock_nested = 1;
117 : read_unlock(&eb->lock);
118 2386309 : return;
119 : }
120 2396231 : if (atomic_read(&eb->blocking_writers)) {
121 : read_unlock(&eb->lock);
122 44983 : wait_event(eb->write_lock_wq,
123 : atomic_read(&eb->blocking_writers) == 0);
124 : goto again;
125 : }
126 2386102 : atomic_inc(&eb->read_locks);
127 2386157 : atomic_inc(&eb->spinning_readers);
128 : }
129 :
130 : /*
131 : * returns 1 if we get the read lock and 0 if we don't
132 : * this won't wait for blocking writers
133 : */
134 453366 : int btrfs_try_tree_read_lock(struct extent_buffer *eb)
135 : {
136 453366 : if (atomic_read(&eb->blocking_writers))
137 : return 0;
138 :
139 452663 : if (!read_trylock(&eb->lock))
140 : return 0;
141 :
142 452278 : if (atomic_read(&eb->blocking_writers)) {
143 : read_unlock(&eb->lock);
144 0 : return 0;
145 : }
146 452278 : atomic_inc(&eb->read_locks);
147 452311 : atomic_inc(&eb->spinning_readers);
148 452304 : return 1;
149 : }
150 :
151 : /*
152 : * returns 1 if we get the read lock and 0 if we don't
153 : * this won't wait for blocking writers or readers
154 : */
155 758239 : int btrfs_try_tree_write_lock(struct extent_buffer *eb)
156 : {
157 1511855 : if (atomic_read(&eb->blocking_writers) ||
158 : atomic_read(&eb->blocking_readers))
159 : return 0;
160 :
161 753328 : if (!write_trylock(&eb->lock))
162 : return 0;
163 :
164 1499373 : if (atomic_read(&eb->blocking_writers) ||
165 : atomic_read(&eb->blocking_readers)) {
166 : write_unlock(&eb->lock);
167 1 : return 0;
168 : }
169 749684 : atomic_inc(&eb->write_locks);
170 749696 : atomic_inc(&eb->spinning_writers);
171 749709 : eb->lock_owner = current->pid;
172 749709 : return 1;
173 : }
174 :
175 : /*
176 : * drop a spinning read lock
177 : */
178 2422736 : void btrfs_tree_read_unlock(struct extent_buffer *eb)
179 : {
180 : /*
181 : * if we're nested, we have the write lock. No new locking
182 : * is needed as long as we are the lock owner.
183 : * The write unlock will do a barrier for us, and the lock_nested
184 : * field only matters to the lock owner.
185 : */
186 2422738 : if (eb->lock_nested && current->pid == eb->lock_owner) {
187 2 : eb->lock_nested = 0;
188 2423236 : return;
189 : }
190 : btrfs_assert_tree_read_locked(eb);
191 2422734 : WARN_ON(atomic_read(&eb->spinning_readers) == 0);
192 2422734 : atomic_dec(&eb->spinning_readers);
193 2423212 : atomic_dec(&eb->read_locks);
194 : read_unlock(&eb->lock);
195 : }
196 :
197 : /*
198 : * drop a blocking read lock
199 : */
200 415251 : void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
201 : {
202 : /*
203 : * if we're nested, we have the write lock. No new locking
204 : * is needed as long as we are the lock owner.
205 : * The write unlock will do a barrier for us, and the lock_nested
206 : * field only matters to the lock owner.
207 : */
208 415391 : if (eb->lock_nested && current->pid == eb->lock_owner) {
209 140 : eb->lock_nested = 0;
210 415461 : return;
211 : }
212 : btrfs_assert_tree_read_locked(eb);
213 415111 : WARN_ON(atomic_read(&eb->blocking_readers) == 0);
214 1245323 : if (atomic_dec_and_test(&eb->blocking_readers) &&
215 : waitqueue_active(&eb->read_lock_wq))
216 479 : wake_up(&eb->read_lock_wq);
217 415176 : atomic_dec(&eb->read_locks);
218 : }
219 :
220 : /*
221 : * take a spinning write lock. This will wait for both
222 : * blocking readers or writers
223 : */
224 724348 : void btrfs_tree_lock(struct extent_buffer *eb)
225 : {
226 : again:
227 734598 : wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
228 742206 : wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
229 726622 : write_lock(&eb->lock);
230 726722 : if (atomic_read(&eb->blocking_readers)) {
231 : write_unlock(&eb->lock);
232 2538 : wait_event(eb->read_lock_wq,
233 : atomic_read(&eb->blocking_readers) == 0);
234 : goto again;
235 : }
236 726093 : if (atomic_read(&eb->blocking_writers)) {
237 : write_unlock(&eb->lock);
238 6802 : wait_event(eb->write_lock_wq,
239 : atomic_read(&eb->blocking_writers) == 0);
240 : goto again;
241 : }
242 724450 : WARN_ON(atomic_read(&eb->spinning_writers));
243 724450 : atomic_inc(&eb->spinning_writers);
244 724484 : atomic_inc(&eb->write_locks);
245 724492 : eb->lock_owner = current->pid;
246 724492 : }
247 :
248 : /*
249 : * drop a spinning or a blocking write lock.
250 : */
251 1473386 : void btrfs_tree_unlock(struct extent_buffer *eb)
252 : {
253 : int blockers = atomic_read(&eb->blocking_writers);
254 :
255 1473386 : BUG_ON(blockers > 1);
256 :
257 : btrfs_assert_tree_locked(eb);
258 1473386 : eb->lock_owner = 0;
259 1473386 : atomic_dec(&eb->write_locks);
260 :
261 1474199 : if (blockers) {
262 468321 : WARN_ON(atomic_read(&eb->spinning_writers));
263 468321 : atomic_dec(&eb->blocking_writers);
264 468309 : smp_mb();
265 468323 : if (waitqueue_active(&eb->write_lock_wq))
266 8274 : wake_up(&eb->write_lock_wq);
267 : } else {
268 1005878 : WARN_ON(atomic_read(&eb->spinning_writers) != 1);
269 1005878 : atomic_dec(&eb->spinning_writers);
270 : write_unlock(&eb->lock);
271 : }
272 1474177 : }
273 :
274 1317377 : void btrfs_assert_tree_locked(struct extent_buffer *eb)
275 : {
276 3585615 : BUG_ON(!atomic_read(&eb->write_locks));
277 1317377 : }
278 :
279 : static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
280 : {
281 3258300 : BUG_ON(!atomic_read(&eb->read_locks));
282 : }
|