LCOV - code coverage report
Current view: top level - fs/btrfs - delayed-ref.c (source / functions) Hit Total Coverage
Test: btrfstest.info Lines: 275 323 85.1 %
Date: 2014-11-28 Functions: 20 22 90.9 %

          Line data    Source code
       1             : /*
       2             :  * Copyright (C) 2009 Oracle.  All rights reserved.
       3             :  *
       4             :  * This program is free software; you can redistribute it and/or
       5             :  * modify it under the terms of the GNU General Public
       6             :  * License v2 as published by the Free Software Foundation.
       7             :  *
       8             :  * This program is distributed in the hope that it will be useful,
       9             :  * but WITHOUT ANY WARRANTY; without even the implied warranty of
      10             :  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
      11             :  * General Public License for more details.
      12             :  *
      13             :  * You should have received a copy of the GNU General Public
      14             :  * License along with this program; if not, write to the
      15             :  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
      16             :  * Boston, MA 021110-1307, USA.
      17             :  */
      18             : 
      19             : #include <linux/sched.h>
      20             : #include <linux/slab.h>
      21             : #include <linux/sort.h>
      22             : #include "ctree.h"
      23             : #include "delayed-ref.h"
      24             : #include "transaction.h"
      25             : 
      26             : struct kmem_cache *btrfs_delayed_ref_head_cachep;
      27             : struct kmem_cache *btrfs_delayed_tree_ref_cachep;
      28             : struct kmem_cache *btrfs_delayed_data_ref_cachep;
      29             : struct kmem_cache *btrfs_delayed_extent_op_cachep;
      30             : /*
      31             :  * delayed back reference update tracking.  For subvolume trees
      32             :  * we queue up extent allocations and backref maintenance for
      33             :  * delayed processing.   This avoids deep call chains where we
      34             :  * add extents in the middle of btrfs_search_slot, and it allows
      35             :  * us to buffer up frequently modified backrefs in an rb tree instead
      36             :  * of hammering updates on the extent allocation tree.
      37             :  */
      38             : 
      39             : /*
      40             :  * compare two delayed tree backrefs with same bytenr and type
      41             :  */
      42             : static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
      43             :                           struct btrfs_delayed_tree_ref *ref1, int type)
      44             : {
      45        2545 :         if (type == BTRFS_TREE_BLOCK_REF_KEY) {
      46        2472 :                 if (ref1->root < ref2->root)
      47             :                         return -1;
      48        2472 :                 if (ref1->root > ref2->root)
      49             :                         return 1;
      50             :         } else {
      51          73 :                 if (ref1->parent < ref2->parent)
      52             :                         return -1;
      53          55 :                 if (ref1->parent > ref2->parent)
      54             :                         return 1;
      55             :         }
      56             :         return 0;
      57             : }
      58             : 
      59             : /*
      60             :  * compare two delayed data backrefs with same bytenr and type
      61             :  */
      62        6871 : static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
      63             :                           struct btrfs_delayed_data_ref *ref1)
      64             : {
      65        6871 :         if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
      66        4014 :                 if (ref1->root < ref2->root)
      67             :                         return -1;
      68        4010 :                 if (ref1->root > ref2->root)
      69             :                         return 1;
      70        3993 :                 if (ref1->objectid < ref2->objectid)
      71             :                         return -1;
      72        3992 :                 if (ref1->objectid > ref2->objectid)
      73             :                         return 1;
      74        3973 :                 if (ref1->offset < ref2->offset)
      75             :                         return -1;
      76        3961 :                 if (ref1->offset > ref2->offset)
      77             :                         return 1;
      78             :         } else {
      79        2857 :                 if (ref1->parent < ref2->parent)
      80             :                         return -1;
      81        2857 :                 if (ref1->parent > ref2->parent)
      82             :                         return 1;
      83             :         }
      84        5780 :         return 0;
      85             : }
      86             : 
      87             : /*
      88             :  * entries in the rb tree are ordered by the byte number of the extent,
      89             :  * type of the delayed backrefs and content of delayed backrefs.
      90             :  */
      91       16469 : static int comp_entry(struct btrfs_delayed_ref_node *ref2,
      92             :                       struct btrfs_delayed_ref_node *ref1,
      93             :                       bool compare_seq)
      94             : {
      95       16469 :         if (ref1->bytenr < ref2->bytenr)
      96             :                 return -1;
      97       16469 :         if (ref1->bytenr > ref2->bytenr)
      98             :                 return 1;
      99       16469 :         if (ref1->is_head && ref2->is_head)
     100             :                 return 0;
     101       16469 :         if (ref2->is_head)
     102             :                 return -1;
     103       16469 :         if (ref1->is_head)
     104             :                 return 1;
     105       16469 :         if (ref1->type < ref2->type)
     106             :                 return -1;
     107       12906 :         if (ref1->type > ref2->type)
     108             :                 return 1;
     109       10391 :         if (ref1->no_quota > ref2->no_quota)
     110             :                 return 1;
     111       10288 :         if (ref1->no_quota < ref2->no_quota)
     112             :                 return -1;
     113             :         /* merging of sequenced refs is not allowed */
     114        9740 :         if (compare_seq) {
     115        9458 :                 if (ref1->seq < ref2->seq)
     116             :                         return -1;
     117        9458 :                 if (ref1->seq > ref2->seq)
     118             :                         return 1;
     119             :         }
     120        9416 :         if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
     121             :             ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
     122        5090 :                 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
     123             :                                       btrfs_delayed_node_to_tree_ref(ref1),
     124             :                                       ref1->type);
     125        6871 :         } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
     126             :                    ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
     127        6871 :                 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
     128             :                                       btrfs_delayed_node_to_data_ref(ref1));
     129             :         }
     130           0 :         BUG();
     131             :         return 0;
     132             : }
     133             : 
     134             : /*
     135             :  * insert a new ref into the rbtree.  This returns any existing refs
     136             :  * for the same (bytenr,parent) tuple, or NULL if the new node was properly
     137             :  * inserted.
     138             :  */
     139      209209 : static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
     140             :                                                   struct rb_node *node)
     141             : {
     142      209209 :         struct rb_node **p = &root->rb_node;
     143             :         struct rb_node *parent_node = NULL;
     144             :         struct btrfs_delayed_ref_node *entry;
     145             :         struct btrfs_delayed_ref_node *ins;
     146             :         int cmp;
     147             : 
     148             :         ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
     149      426091 :         while (*p) {
     150             :                 parent_node = *p;
     151             :                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
     152             :                                  rb_node);
     153             : 
     154       15669 :                 cmp = comp_entry(entry, ins, 1);
     155       15669 :                 if (cmp < 0)
     156        4146 :                         p = &(*p)->rb_left;
     157       11523 :                 else if (cmp > 0)
     158        3527 :                         p = &(*p)->rb_right;
     159             :                 else
     160             :                         return entry;
     161             :         }
     162             : 
     163             :         rb_link_node(node, parent_node, p);
     164      201213 :         rb_insert_color(node, root);
     165      201213 :         return NULL;
     166             : }
     167             : 
     168             : /* insert a new ref to head ref rbtree */
     169      209593 : static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
     170             :                                                    struct rb_node *node)
     171             : {
     172      209593 :         struct rb_node **p = &root->rb_node;
     173             :         struct rb_node *parent_node = NULL;
     174             :         struct btrfs_delayed_ref_head *entry;
     175             :         struct btrfs_delayed_ref_head *ins;
     176             :         u64 bytenr;
     177             : 
     178             :         ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
     179      209593 :         bytenr = ins->node.bytenr;
     180     2093066 :         while (*p) {
     181             :                 parent_node = *p;
     182     1689445 :                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
     183             :                                  href_node);
     184             : 
     185     1689445 :                 if (bytenr < entry->node.bytenr)
     186      443539 :                         p = &(*p)->rb_left;
     187     1245906 :                 else if (bytenr > entry->node.bytenr)
     188     1230341 :                         p = &(*p)->rb_right;
     189             :                 else
     190             :                         return entry;
     191             :         }
     192             : 
     193             :         rb_link_node(node, parent_node, p);
     194      194028 :         rb_insert_color(node, root);
     195      194028 :         return NULL;
     196             : }
     197             : 
     198             : /*
     199             :  * find an head entry based on bytenr. This returns the delayed ref
     200             :  * head if it was able to find one, or NULL if nothing was in that spot.
     201             :  * If return_bigger is given, the next bigger entry is returned if no exact
     202             :  * match is found.
     203             :  */
     204             : static struct btrfs_delayed_ref_head *
     205      299244 : find_ref_head(struct rb_root *root, u64 bytenr,
     206             :               int return_bigger)
     207             : {
     208             :         struct rb_node *n;
     209             :         struct btrfs_delayed_ref_head *entry;
     210             : 
     211      299244 :         n = root->rb_node;
     212             :         entry = NULL;
     213     1507529 :         while (n) {
     214     1041382 :                 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
     215             : 
     216     1041382 :                 if (bytenr < entry->node.bytenr)
     217      714798 :                         n = n->rb_left;
     218      326584 :                 else if (bytenr > entry->node.bytenr)
     219      194243 :                         n = n->rb_right;
     220             :                 else
     221             :                         return entry;
     222             :         }
     223      166903 :         if (entry && return_bigger) {
     224       73344 :                 if (bytenr > entry->node.bytenr) {
     225       22932 :                         n = rb_next(&entry->href_node);
     226       22932 :                         if (!n)
     227        2383 :                                 n = rb_first(root);
     228             :                         entry = rb_entry(n, struct btrfs_delayed_ref_head,
     229             :                                          href_node);
     230       22932 :                         return entry;
     231             :                 }
     232             :                 return entry;
     233             :         }
     234             :         return NULL;
     235             : }
     236             : 
     237      191772 : int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
     238             :                            struct btrfs_delayed_ref_head *head)
     239             : {
     240             :         struct btrfs_delayed_ref_root *delayed_refs;
     241             : 
     242      191772 :         delayed_refs = &trans->transaction->delayed_refs;
     243      191772 :         assert_spin_locked(&delayed_refs->lock);
     244      191772 :         if (mutex_trylock(&head->mutex))
     245             :                 return 0;
     246             : 
     247           0 :         atomic_inc(&head->node.refs);
     248             :         spin_unlock(&delayed_refs->lock);
     249             : 
     250           0 :         mutex_lock(&head->mutex);
     251             :         spin_lock(&delayed_refs->lock);
     252           0 :         if (!head->node.in_tree) {
     253           0 :                 mutex_unlock(&head->mutex);
     254           0 :                 btrfs_put_delayed_ref(&head->node);
     255           0 :                 return -EAGAIN;
     256             :         }
     257           0 :         btrfs_put_delayed_ref(&head->node);
     258           0 :         return 0;
     259             : }
     260             : 
     261        6448 : static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
     262             :                                     struct btrfs_delayed_ref_root *delayed_refs,
     263             :                                     struct btrfs_delayed_ref_head *head,
     264             :                                     struct btrfs_delayed_ref_node *ref)
     265             : {
     266        6448 :         if (btrfs_delayed_ref_is_head(ref)) {
     267             :                 head = btrfs_delayed_node_to_head(ref);
     268           0 :                 rb_erase(&head->href_node, &delayed_refs->href_root);
     269             :         } else {
     270        6448 :                 assert_spin_locked(&head->lock);
     271        6448 :                 rb_erase(&ref->rb_node, &head->ref_root);
     272             :         }
     273        6448 :         ref->in_tree = 0;
     274        6448 :         btrfs_put_delayed_ref(ref);
     275        6448 :         atomic_dec(&delayed_refs->num_entries);
     276        6448 :         if (trans->delayed_ref_updates)
     277        3755 :                 trans->delayed_ref_updates--;
     278        6448 : }
     279             : 
     280      106802 : static int merge_ref(struct btrfs_trans_handle *trans,
     281             :                      struct btrfs_delayed_ref_root *delayed_refs,
     282             :                      struct btrfs_delayed_ref_head *head,
     283             :                      struct btrfs_delayed_ref_node *ref, u64 seq)
     284             : {
     285             :         struct rb_node *node;
     286             :         int mod = 0;
     287             :         int done = 0;
     288             : 
     289      106802 :         node = rb_next(&ref->rb_node);
     290      214404 :         while (!done && node) {
     291             :                 struct btrfs_delayed_ref_node *next;
     292             : 
     293             :                 next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
     294         800 :                 node = rb_next(node);
     295         800 :                 if (seq && next->seq >= seq)
     296             :                         break;
     297         800 :                 if (comp_entry(ref, next, 0))
     298         531 :                         continue;
     299             : 
     300         269 :                 if (ref->action == next->action) {
     301           0 :                         mod = next->ref_mod;
     302             :                 } else {
     303         269 :                         if (ref->ref_mod < next->ref_mod) {
     304             :                                 struct btrfs_delayed_ref_node *tmp;
     305             : 
     306             :                                 tmp = ref;
     307             :                                 ref = next;
     308             :                                 next = tmp;
     309             :                                 done = 1;
     310             :                         }
     311         269 :                         mod = -next->ref_mod;
     312             :                 }
     313             : 
     314         269 :                 drop_delayed_ref(trans, delayed_refs, head, next);
     315         269 :                 ref->ref_mod += mod;
     316         269 :                 if (ref->ref_mod == 0) {
     317         269 :                         drop_delayed_ref(trans, delayed_refs, head, ref);
     318             :                         done = 1;
     319             :                 } else {
     320             :                         /*
     321             :                          * You can't have multiples of the same ref on a tree
     322             :                          * block.
     323             :                          */
     324           0 :                         WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
     325             :                                 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
     326             :                 }
     327             :         }
     328      106802 :         return done;
     329             : }
     330             : 
     331      386443 : void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
     332             :                               struct btrfs_fs_info *fs_info,
     333             :                               struct btrfs_delayed_ref_root *delayed_refs,
     334             :                               struct btrfs_delayed_ref_head *head)
     335             : {
     336             :         struct rb_node *node;
     337             :         u64 seq = 0;
     338             : 
     339      386443 :         assert_spin_locked(&head->lock);
     340             :         /*
     341             :          * We don't have too much refs to merge in the case of delayed data
     342             :          * refs.
     343             :          */
     344      386443 :         if (head->is_data)
     345      386449 :                 return;
     346             : 
     347             :         spin_lock(&fs_info->tree_mod_seq_lock);
     348      424090 :         if (!list_empty(&fs_info->tree_mod_seq_list)) {
     349             :                 struct seq_list *elem;
     350             : 
     351             :                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
     352             :                                         struct seq_list, list);
     353        2114 :                 seq = elem->seq;
     354             :         }
     355             :         spin_unlock(&fs_info->tree_mod_seq_lock);
     356             : 
     357      212045 :         node = rb_first(&head->ref_root);
     358      530890 :         while (node) {
     359             :                 struct btrfs_delayed_ref_node *ref;
     360             : 
     361             :                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
     362             :                                rb_node);
     363             :                 /* We can't merge refs that are outside of our seq count */
     364      106804 :                 if (seq && ref->seq >= seq)
     365             :                         break;
     366      106802 :                 if (merge_ref(trans, delayed_refs, head, ref, seq))
     367         269 :                         node = rb_first(&head->ref_root);
     368             :                 else
     369      106533 :                         node = rb_next(&ref->rb_node);
     370             :         }
     371             : }
     372             : 
     373       14648 : int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
     374             :                             struct btrfs_delayed_ref_root *delayed_refs,
     375             :                             u64 seq)
     376             : {
     377             :         struct seq_list *elem;
     378             :         int ret = 0;
     379             : 
     380             :         spin_lock(&fs_info->tree_mod_seq_lock);
     381       29296 :         if (!list_empty(&fs_info->tree_mod_seq_list)) {
     382             :                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
     383             :                                         struct seq_list, list);
     384        3595 :                 if (seq >= elem->seq) {
     385           8 :                         pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
     386             :                                  (u32)(seq >> 32), (u32)seq,
     387             :                                  (u32)(elem->seq >> 32), (u32)elem->seq,
     388             :                                  delayed_refs);
     389             :                         ret = 1;
     390             :                 }
     391             :         }
     392             : 
     393             :         spin_unlock(&fs_info->tree_mod_seq_lock);
     394       14648 :         return ret;
     395             : }
     396             : 
     397             : struct btrfs_delayed_ref_head *
     398      228327 : btrfs_select_ref_head(struct btrfs_trans_handle *trans)
     399             : {
     400             :         struct btrfs_delayed_ref_root *delayed_refs;
     401             :         struct btrfs_delayed_ref_head *head;
     402             :         u64 start;
     403             :         bool loop = false;
     404             : 
     405      228327 :         delayed_refs = &trans->transaction->delayed_refs;
     406             : 
     407             : again:
     408      228538 :         start = delayed_refs->run_delayed_start;
     409      228538 :         head = find_ref_head(&delayed_refs->href_root, start, 1);
     410      228538 :         if (!head && !loop) {
     411       36346 :                 delayed_refs->run_delayed_start = 0;
     412             :                 start = 0;
     413             :                 loop = true;
     414       36346 :                 head = find_ref_head(&delayed_refs->href_root, start, 1);
     415       36346 :                 if (!head)
     416             :                         return NULL;
     417      192192 :         } else if (!head && loop) {
     418             :                 return NULL;
     419             :         }
     420             : 
     421      192446 :         while (head->processing) {
     422             :                 struct rb_node *node;
     423             : 
     424         674 :                 node = rb_next(&head->href_node);
     425         674 :                 if (!node) {
     426         420 :                         if (loop)
     427             :                                 return NULL;
     428         211 :                         delayed_refs->run_delayed_start = 0;
     429             :                         start = 0;
     430             :                         loop = true;
     431         211 :                         goto again;
     432             :                 }
     433         254 :                 head = rb_entry(node, struct btrfs_delayed_ref_head,
     434             :                                 href_node);
     435             :         }
     436             : 
     437      191772 :         head->processing = 1;
     438      191772 :         WARN_ON(delayed_refs->num_heads_ready == 0);
     439      191772 :         delayed_refs->num_heads_ready--;
     440      383544 :         delayed_refs->run_delayed_start = head->node.bytenr +
     441      191772 :                 head->node.num_bytes;
     442      191772 :         return head;
     443             : }
     444             : 
     445             : /*
     446             :  * helper function to update an extent delayed ref in the
     447             :  * rbtree.  existing and update must both have the same
     448             :  * bytenr and parent
     449             :  *
     450             :  * This may free existing if the update cancels out whatever
     451             :  * operation it was doing.
     452             :  */
     453             : static noinline void
     454        7996 : update_existing_ref(struct btrfs_trans_handle *trans,
     455             :                     struct btrfs_delayed_ref_root *delayed_refs,
     456             :                     struct btrfs_delayed_ref_head *head,
     457             :                     struct btrfs_delayed_ref_node *existing,
     458             :                     struct btrfs_delayed_ref_node *update)
     459             : {
     460        7996 :         if (update->action != existing->action) {
     461             :                 /*
     462             :                  * this is effectively undoing either an add or a
     463             :                  * drop.  We decrement the ref_mod, and if it goes
     464             :                  * down to zero we just delete the entry without
     465             :                  * every changing the extent allocation tree.
     466             :                  */
     467        5937 :                 existing->ref_mod--;
     468        5937 :                 if (existing->ref_mod == 0)
     469        5910 :                         drop_delayed_ref(trans, delayed_refs, head, existing);
     470             :                 else
     471          27 :                         WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
     472             :                                 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
     473             :         } else {
     474        2059 :                 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
     475             :                         existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
     476             :                 /*
     477             :                  * the action on the existing ref matches
     478             :                  * the action on the ref we're trying to add.
     479             :                  * Bump the ref_mod by one so the backref that
     480             :                  * is eventually added/removed has the correct
     481             :                  * reference count
     482             :                  */
     483        2059 :                 existing->ref_mod += update->ref_mod;
     484             :         }
     485        7996 : }
     486             : 
     487             : /*
     488             :  * helper function to update the accounting in the head ref
     489             :  * existing and update must have the same bytenr
     490             :  */
     491             : static noinline void
     492       15591 : update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
     493             :                          struct btrfs_delayed_ref_node *update)
     494             : {
     495             :         struct btrfs_delayed_ref_head *existing_ref;
     496             :         struct btrfs_delayed_ref_head *ref;
     497             : 
     498             :         existing_ref = btrfs_delayed_node_to_head(existing);
     499             :         ref = btrfs_delayed_node_to_head(update);
     500       15591 :         BUG_ON(existing_ref->is_data != ref->is_data);
     501             : 
     502             :         spin_lock(&existing_ref->lock);
     503       15591 :         if (ref->must_insert_reserved) {
     504             :                 /* if the extent was freed and then
     505             :                  * reallocated before the delayed ref
     506             :                  * entries were processed, we can end up
     507             :                  * with an existing head ref without
     508             :                  * the must_insert_reserved flag set.
     509             :                  * Set it again here
     510             :                  */
     511           0 :                 existing_ref->must_insert_reserved = ref->must_insert_reserved;
     512             : 
     513             :                 /*
     514             :                  * update the num_bytes so we make sure the accounting
     515             :                  * is done correctly
     516             :                  */
     517           0 :                 existing->num_bytes = update->num_bytes;
     518             : 
     519             :         }
     520             : 
     521       15591 :         if (ref->extent_op) {
     522         113 :                 if (!existing_ref->extent_op) {
     523         113 :                         existing_ref->extent_op = ref->extent_op;
     524             :                 } else {
     525           0 :                         if (ref->extent_op->update_key) {
     526           0 :                                 memcpy(&existing_ref->extent_op->key,
     527           0 :                                        &ref->extent_op->key,
     528             :                                        sizeof(ref->extent_op->key));
     529           0 :                                 existing_ref->extent_op->update_key = 1;
     530             :                         }
     531           0 :                         if (ref->extent_op->update_flags) {
     532           0 :                                 existing_ref->extent_op->flags_to_set |=
     533           0 :                                         ref->extent_op->flags_to_set;
     534           0 :                                 existing_ref->extent_op->update_flags = 1;
     535             :                         }
     536           0 :                         btrfs_free_delayed_extent_op(ref->extent_op);
     537             :                 }
     538             :         }
     539             :         /*
     540             :          * update the reference mod on the head to reflect this new operation,
     541             :          * only need the lock for this case cause we could be processing it
     542             :          * currently, for refs we just added we know we're a-ok.
     543             :          */
     544       15591 :         existing->ref_mod += update->ref_mod;
     545             :         spin_unlock(&existing_ref->lock);
     546       15591 : }
     547             : 
     548             : /*
     549             :  * helper function to actually insert a head node into the rbtree.
     550             :  * this does all the dirty work in terms of maintaining the correct
     551             :  * overall modification count.
     552             :  */
     553             : static noinline struct btrfs_delayed_ref_head *
     554      209618 : add_delayed_ref_head(struct btrfs_fs_info *fs_info,
     555             :                      struct btrfs_trans_handle *trans,
     556             :                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
     557             :                      u64 num_bytes, int action, int is_data)
     558             : {
     559             :         struct btrfs_delayed_ref_head *existing;
     560             :         struct btrfs_delayed_ref_head *head_ref = NULL;
     561             :         struct btrfs_delayed_ref_root *delayed_refs;
     562             :         int count_mod = 1;
     563             :         int must_insert_reserved = 0;
     564             : 
     565             :         /*
     566             :          * the head node stores the sum of all the mods, so dropping a ref
     567             :          * should drop the sum in the head node by one.
     568             :          */
     569      209618 :         if (action == BTRFS_UPDATE_DELAYED_HEAD)
     570             :                 count_mod = 0;
     571      209206 :         else if (action == BTRFS_DROP_DELAYED_REF)
     572             :                 count_mod = -1;
     573             : 
     574             :         /*
     575             :          * BTRFS_ADD_DELAYED_EXTENT means that we need to update
     576             :          * the reserved accounting when the extent is finally added, or
     577             :          * if a later modification deletes the delayed ref without ever
     578             :          * inserting the extent into the extent allocation tree.
     579             :          * ref->must_insert_reserved is the flag used to record
     580             :          * that accounting mods are required.
     581             :          *
     582             :          * Once we record must_insert_reserved, switch the action to
     583             :          * BTRFS_ADD_DELAYED_REF because other special casing is not required.
     584             :          */
     585      209618 :         if (action == BTRFS_ADD_DELAYED_EXTENT)
     586             :                 must_insert_reserved = 1;
     587             :         else
     588             :                 must_insert_reserved = 0;
     589             : 
     590      209618 :         delayed_refs = &trans->transaction->delayed_refs;
     591             : 
     592             :         /* first set the basic ref node struct up */
     593             :         atomic_set(&ref->refs, 1);
     594      209618 :         ref->bytenr = bytenr;
     595      209618 :         ref->num_bytes = num_bytes;
     596      209618 :         ref->ref_mod = count_mod;
     597      209618 :         ref->type  = 0;
     598      209618 :         ref->action  = 0;
     599      209618 :         ref->is_head = 1;
     600      209618 :         ref->in_tree = 1;
     601      209618 :         ref->seq = 0;
     602             : 
     603             :         head_ref = btrfs_delayed_node_to_head(ref);
     604      209618 :         head_ref->must_insert_reserved = must_insert_reserved;
     605      209618 :         head_ref->is_data = is_data;
     606      209618 :         head_ref->ref_root = RB_ROOT;
     607      209618 :         head_ref->processing = 0;
     608             : 
     609      209618 :         spin_lock_init(&head_ref->lock);
     610      209618 :         mutex_init(&head_ref->mutex);
     611             : 
     612      209619 :         trace_add_delayed_ref_head(ref, head_ref, action);
     613             : 
     614      209619 :         existing = htree_insert(&delayed_refs->href_root,
     615             :                                 &head_ref->href_node);
     616      209619 :         if (existing) {
     617       15591 :                 update_existing_head_ref(&existing->node, ref);
     618             :                 /*
     619             :                  * we've updated the existing ref, free the newly
     620             :                  * allocated ref
     621             :                  */
     622       15591 :                 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
     623             :                 head_ref = existing;
     624             :         } else {
     625      194028 :                 delayed_refs->num_heads++;
     626      194028 :                 delayed_refs->num_heads_ready++;
     627      194028 :                 atomic_inc(&delayed_refs->num_entries);
     628      194030 :                 trans->delayed_ref_updates++;
     629             :         }
     630      209621 :         return head_ref;
     631             : }
     632             : 
     633             : /*
     634             :  * helper to insert a delayed tree ref into the rbtree.
     635             :  */
     636             : static noinline void
     637      111044 : add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
     638             :                      struct btrfs_trans_handle *trans,
     639             :                      struct btrfs_delayed_ref_head *head_ref,
     640             :                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
     641             :                      u64 num_bytes, u64 parent, u64 ref_root, int level,
     642             :                      int action, int no_quota)
     643             : {
     644             :         struct btrfs_delayed_ref_node *existing;
     645             :         struct btrfs_delayed_tree_ref *full_ref;
     646             :         struct btrfs_delayed_ref_root *delayed_refs;
     647             :         u64 seq = 0;
     648             : 
     649      111044 :         if (action == BTRFS_ADD_DELAYED_EXTENT)
     650             :                 action = BTRFS_ADD_DELAYED_REF;
     651             : 
     652      111044 :         if (is_fstree(ref_root))
     653       54947 :                 seq = atomic64_read(&fs_info->tree_mod_seq);
     654      111044 :         delayed_refs = &trans->transaction->delayed_refs;
     655             : 
     656             :         /* first set the basic ref node struct up */
     657             :         atomic_set(&ref->refs, 1);
     658      111044 :         ref->bytenr = bytenr;
     659      111044 :         ref->num_bytes = num_bytes;
     660      111044 :         ref->ref_mod = 1;
     661      111044 :         ref->action = action;
     662      111044 :         ref->is_head = 0;
     663      111044 :         ref->in_tree = 1;
     664      111044 :         ref->no_quota = no_quota;
     665      111044 :         ref->seq = seq;
     666             : 
     667             :         full_ref = btrfs_delayed_node_to_tree_ref(ref);
     668      111044 :         full_ref->parent = parent;
     669      111044 :         full_ref->root = ref_root;
     670      111044 :         if (parent)
     671        1390 :                 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
     672             :         else
     673      109654 :                 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
     674      111044 :         full_ref->level = level;
     675             : 
     676      111044 :         trace_add_delayed_tree_ref(ref, full_ref, action);
     677             : 
     678             :         spin_lock(&head_ref->lock);
     679      111044 :         existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
     680      111043 :         if (existing) {
     681        2216 :                 update_existing_ref(trans, delayed_refs, head_ref, existing,
     682             :                                     ref);
     683             :                 /*
     684             :                  * we've updated the existing ref, free the newly
     685             :                  * allocated ref
     686             :                  */
     687        2216 :                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
     688             :         } else {
     689      108827 :                 atomic_inc(&delayed_refs->num_entries);
     690      108827 :                 trans->delayed_ref_updates++;
     691             :         }
     692             :         spin_unlock(&head_ref->lock);
     693      111043 : }
     694             : 
     695             : /*
     696             :  * helper to insert a delayed data ref into the rbtree.
     697             :  */
     698             : static noinline void
     699       98165 : add_delayed_data_ref(struct btrfs_fs_info *fs_info,
     700             :                      struct btrfs_trans_handle *trans,
     701             :                      struct btrfs_delayed_ref_head *head_ref,
     702             :                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
     703             :                      u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
     704             :                      u64 offset, int action, int no_quota)
     705             : {
     706             :         struct btrfs_delayed_ref_node *existing;
     707             :         struct btrfs_delayed_data_ref *full_ref;
     708             :         struct btrfs_delayed_ref_root *delayed_refs;
     709             :         u64 seq = 0;
     710             : 
     711       98165 :         if (action == BTRFS_ADD_DELAYED_EXTENT)
     712             :                 action = BTRFS_ADD_DELAYED_REF;
     713             : 
     714       98165 :         delayed_refs = &trans->transaction->delayed_refs;
     715             : 
     716       98165 :         if (is_fstree(ref_root))
     717       88306 :                 seq = atomic64_read(&fs_info->tree_mod_seq);
     718             : 
     719             :         /* first set the basic ref node struct up */
     720             :         atomic_set(&ref->refs, 1);
     721       98165 :         ref->bytenr = bytenr;
     722       98165 :         ref->num_bytes = num_bytes;
     723       98165 :         ref->ref_mod = 1;
     724       98165 :         ref->action = action;
     725       98165 :         ref->is_head = 0;
     726       98165 :         ref->in_tree = 1;
     727       98165 :         ref->no_quota = no_quota;
     728       98165 :         ref->seq = seq;
     729             : 
     730             :         full_ref = btrfs_delayed_node_to_data_ref(ref);
     731       98165 :         full_ref->parent = parent;
     732       98165 :         full_ref->root = ref_root;
     733       98165 :         if (parent)
     734       18590 :                 ref->type = BTRFS_SHARED_DATA_REF_KEY;
     735             :         else
     736       79575 :                 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
     737             : 
     738       98165 :         full_ref->objectid = owner;
     739       98165 :         full_ref->offset = offset;
     740             : 
     741       98165 :         trace_add_delayed_data_ref(ref, full_ref, action);
     742             : 
     743             :         spin_lock(&head_ref->lock);
     744       98165 :         existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
     745       98164 :         if (existing) {
     746        5780 :                 update_existing_ref(trans, delayed_refs, head_ref, existing,
     747             :                                     ref);
     748             :                 /*
     749             :                  * we've updated the existing ref, free the newly
     750             :                  * allocated ref
     751             :                  */
     752        5780 :                 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
     753             :         } else {
     754       92384 :                 atomic_inc(&delayed_refs->num_entries);
     755       92384 :                 trans->delayed_ref_updates++;
     756             :         }
     757             :         spin_unlock(&head_ref->lock);
     758       98164 : }
     759             : 
     760             : /*
     761             :  * add a delayed tree ref.  This does all of the accounting required
     762             :  * to make sure the delayed ref is eventually processed before this
     763             :  * transaction commits.
     764             :  */
     765      111033 : int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
     766      111042 :                                struct btrfs_trans_handle *trans,
     767             :                                u64 bytenr, u64 num_bytes, u64 parent,
     768             :                                u64 ref_root,  int level, int action,
     769             :                                struct btrfs_delayed_extent_op *extent_op,
     770             :                                int no_quota)
     771             : {
     772             :         struct btrfs_delayed_tree_ref *ref;
     773             :         struct btrfs_delayed_ref_head *head_ref;
     774             :         struct btrfs_delayed_ref_root *delayed_refs;
     775             : 
     776      111033 :         if (!is_fstree(ref_root) || !fs_info->quota_enabled)
     777             :                 no_quota = 0;
     778             : 
     779      111033 :         BUG_ON(extent_op && extent_op->is_data);
     780      111033 :         ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
     781      111033 :         if (!ref)
     782             :                 return -ENOMEM;
     783             : 
     784      111033 :         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
     785      111039 :         if (!head_ref) {
     786           0 :                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
     787           0 :                 return -ENOMEM;
     788             :         }
     789             : 
     790      111039 :         head_ref->extent_op = extent_op;
     791             : 
     792      111039 :         delayed_refs = &trans->transaction->delayed_refs;
     793             :         spin_lock(&delayed_refs->lock);
     794             : 
     795             :         /*
     796             :          * insert both the head node and the new ref without dropping
     797             :          * the spin lock
     798             :          */
     799      222084 :         head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
     800             :                                         bytenr, num_bytes, action, 0);
     801             : 
     802      111044 :         add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
     803             :                                    num_bytes, parent, ref_root, level, action,
     804             :                                    no_quota);
     805             :         spin_unlock(&delayed_refs->lock);
     806             : 
     807      111043 :         return 0;
     808             : }
     809             : 
     810             : /*
     811             :  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
     812             :  */
     813       98161 : int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
     814       98164 :                                struct btrfs_trans_handle *trans,
     815             :                                u64 bytenr, u64 num_bytes,
     816             :                                u64 parent, u64 ref_root,
     817             :                                u64 owner, u64 offset, int action,
     818             :                                struct btrfs_delayed_extent_op *extent_op,
     819             :                                int no_quota)
     820             : {
     821             :         struct btrfs_delayed_data_ref *ref;
     822             :         struct btrfs_delayed_ref_head *head_ref;
     823             :         struct btrfs_delayed_ref_root *delayed_refs;
     824             : 
     825       98161 :         if (!is_fstree(ref_root) || !fs_info->quota_enabled)
     826             :                 no_quota = 0;
     827             : 
     828       98161 :         BUG_ON(extent_op && !extent_op->is_data);
     829       98161 :         ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
     830       98165 :         if (!ref)
     831             :                 return -ENOMEM;
     832             : 
     833       98165 :         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
     834       98163 :         if (!head_ref) {
     835           0 :                 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
     836           0 :                 return -ENOMEM;
     837             :         }
     838             : 
     839       98163 :         head_ref->extent_op = extent_op;
     840             : 
     841       98163 :         delayed_refs = &trans->transaction->delayed_refs;
     842             :         spin_lock(&delayed_refs->lock);
     843             : 
     844             :         /*
     845             :          * insert both the head node and the new ref without dropping
     846             :          * the spin lock
     847             :          */
     848      196328 :         head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
     849             :                                         bytenr, num_bytes, action, 1);
     850             : 
     851       98165 :         add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
     852             :                                    num_bytes, parent, ref_root, owner, offset,
     853             :                                    action, no_quota);
     854             :         spin_unlock(&delayed_refs->lock);
     855             : 
     856       98164 :         return 0;
     857             : }
     858             : 
     859         412 : int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
     860         412 :                                 struct btrfs_trans_handle *trans,
     861             :                                 u64 bytenr, u64 num_bytes,
     862             :                                 struct btrfs_delayed_extent_op *extent_op)
     863             : {
     864             :         struct btrfs_delayed_ref_head *head_ref;
     865             :         struct btrfs_delayed_ref_root *delayed_refs;
     866             : 
     867         412 :         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
     868         412 :         if (!head_ref)
     869             :                 return -ENOMEM;
     870             : 
     871         412 :         head_ref->extent_op = extent_op;
     872             : 
     873         412 :         delayed_refs = &trans->transaction->delayed_refs;
     874             :         spin_lock(&delayed_refs->lock);
     875             : 
     876         824 :         add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
     877             :                                    num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
     878         412 :                                    extent_op->is_data);
     879             : 
     880             :         spin_unlock(&delayed_refs->lock);
     881         412 :         return 0;
     882             : }
     883             : 
     884             : /*
     885             :  * this does a simple search for the head node for a given extent.
     886             :  * It must be called with the delayed ref spinlock held, and it returns
     887             :  * the head node if any where found, or NULL if not.
     888             :  */
     889             : struct btrfs_delayed_ref_head *
     890       34358 : btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
     891             : {
     892             :         struct btrfs_delayed_ref_root *delayed_refs;
     893             : 
     894       34358 :         delayed_refs = &trans->transaction->delayed_refs;
     895       34358 :         return find_ref_head(&delayed_refs->href_root, bytenr, 0);
     896             : }
     897             : 
     898           0 : void btrfs_delayed_ref_exit(void)
     899             : {
     900           0 :         if (btrfs_delayed_ref_head_cachep)
     901           0 :                 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
     902           0 :         if (btrfs_delayed_tree_ref_cachep)
     903           0 :                 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
     904           0 :         if (btrfs_delayed_data_ref_cachep)
     905           0 :                 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
     906           0 :         if (btrfs_delayed_extent_op_cachep)
     907           0 :                 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
     908           0 : }
     909             : 
     910           0 : int btrfs_delayed_ref_init(void)
     911             : {
     912           0 :         btrfs_delayed_ref_head_cachep = kmem_cache_create(
     913             :                                 "btrfs_delayed_ref_head",
     914             :                                 sizeof(struct btrfs_delayed_ref_head), 0,
     915             :                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
     916           0 :         if (!btrfs_delayed_ref_head_cachep)
     917             :                 goto fail;
     918             : 
     919           0 :         btrfs_delayed_tree_ref_cachep = kmem_cache_create(
     920             :                                 "btrfs_delayed_tree_ref",
     921             :                                 sizeof(struct btrfs_delayed_tree_ref), 0,
     922             :                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
     923           0 :         if (!btrfs_delayed_tree_ref_cachep)
     924             :                 goto fail;
     925             : 
     926           0 :         btrfs_delayed_data_ref_cachep = kmem_cache_create(
     927             :                                 "btrfs_delayed_data_ref",
     928             :                                 sizeof(struct btrfs_delayed_data_ref), 0,
     929             :                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
     930           0 :         if (!btrfs_delayed_data_ref_cachep)
     931             :                 goto fail;
     932             : 
     933           0 :         btrfs_delayed_extent_op_cachep = kmem_cache_create(
     934             :                                 "btrfs_delayed_extent_op",
     935             :                                 sizeof(struct btrfs_delayed_extent_op), 0,
     936             :                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
     937           0 :         if (!btrfs_delayed_extent_op_cachep)
     938             :                 goto fail;
     939             : 
     940             :         return 0;
     941             : fail:
     942           0 :         btrfs_delayed_ref_exit();
     943           0 :         return -ENOMEM;
     944             : }

Generated by: LCOV version 1.10