]> asedeno.scripts.mit.edu Git - linux.git/blob - fs/btrfs/extent-tree.c
Btrfs: record first logical byte in memory
[linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include "compat.h"
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37
38 #undef SCRAMBLE_DELAYED_REFS
39
40 /*
41  * control flags for do_chunk_alloc's force field
42  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
43  * if we really need one.
44  *
45  * CHUNK_ALLOC_LIMITED means to only try and allocate one
46  * if we have very few chunks already allocated.  This is
47  * used as part of the clustering code to help make sure
48  * we have a good pool of storage to cluster in, without
49  * filling the FS with empty chunks
50  *
51  * CHUNK_ALLOC_FORCE means it must try to allocate one
52  *
53  */
54 enum {
55         CHUNK_ALLOC_NO_FORCE = 0,
56         CHUNK_ALLOC_LIMITED = 1,
57         CHUNK_ALLOC_FORCE = 2,
58 };
59
60 /*
61  * Control how reservations are dealt with.
62  *
63  * RESERVE_FREE - freeing a reservation.
64  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
65  *   ENOSPC accounting
66  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
67  *   bytes_may_use as the ENOSPC accounting is done elsewhere
68  */
69 enum {
70         RESERVE_FREE = 0,
71         RESERVE_ALLOC = 1,
72         RESERVE_ALLOC_NO_ACCOUNT = 2,
73 };
74
75 static int update_block_group(struct btrfs_root *root,
76                               u64 bytenr, u64 num_bytes, int alloc);
77 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
78                                 struct btrfs_root *root,
79                                 u64 bytenr, u64 num_bytes, u64 parent,
80                                 u64 root_objectid, u64 owner_objectid,
81                                 u64 owner_offset, int refs_to_drop,
82                                 struct btrfs_delayed_extent_op *extra_op);
83 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
84                                     struct extent_buffer *leaf,
85                                     struct btrfs_extent_item *ei);
86 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
87                                       struct btrfs_root *root,
88                                       u64 parent, u64 root_objectid,
89                                       u64 flags, u64 owner, u64 offset,
90                                       struct btrfs_key *ins, int ref_mod);
91 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
92                                      struct btrfs_root *root,
93                                      u64 parent, u64 root_objectid,
94                                      u64 flags, struct btrfs_disk_key *key,
95                                      int level, struct btrfs_key *ins);
96 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
97                           struct btrfs_root *extent_root, u64 flags,
98                           int force);
99 static int find_next_key(struct btrfs_path *path, int level,
100                          struct btrfs_key *key);
101 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
102                             int dump_block_groups);
103 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
104                                        u64 num_bytes, int reserve);
105
106 static noinline int
107 block_group_cache_done(struct btrfs_block_group_cache *cache)
108 {
109         smp_mb();
110         return cache->cached == BTRFS_CACHE_FINISHED;
111 }
112
113 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
114 {
115         return (cache->flags & bits) == bits;
116 }
117
118 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
119 {
120         atomic_inc(&cache->count);
121 }
122
123 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
124 {
125         if (atomic_dec_and_test(&cache->count)) {
126                 WARN_ON(cache->pinned > 0);
127                 WARN_ON(cache->reserved > 0);
128                 kfree(cache->free_space_ctl);
129                 kfree(cache);
130         }
131 }
132
133 /*
134  * this adds the block group to the fs_info rb tree for the block group
135  * cache
136  */
137 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
138                                 struct btrfs_block_group_cache *block_group)
139 {
140         struct rb_node **p;
141         struct rb_node *parent = NULL;
142         struct btrfs_block_group_cache *cache;
143
144         spin_lock(&info->block_group_cache_lock);
145         p = &info->block_group_cache_tree.rb_node;
146
147         while (*p) {
148                 parent = *p;
149                 cache = rb_entry(parent, struct btrfs_block_group_cache,
150                                  cache_node);
151                 if (block_group->key.objectid < cache->key.objectid) {
152                         p = &(*p)->rb_left;
153                 } else if (block_group->key.objectid > cache->key.objectid) {
154                         p = &(*p)->rb_right;
155                 } else {
156                         spin_unlock(&info->block_group_cache_lock);
157                         return -EEXIST;
158                 }
159         }
160
161         rb_link_node(&block_group->cache_node, parent, p);
162         rb_insert_color(&block_group->cache_node,
163                         &info->block_group_cache_tree);
164
165         if (info->first_logical_byte > block_group->key.objectid)
166                 info->first_logical_byte = block_group->key.objectid;
167
168         spin_unlock(&info->block_group_cache_lock);
169
170         return 0;
171 }
172
173 /*
174  * This will return the block group at or after bytenr if contains is 0, else
175  * it will return the block group that contains the bytenr
176  */
177 static struct btrfs_block_group_cache *
178 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
179                               int contains)
180 {
181         struct btrfs_block_group_cache *cache, *ret = NULL;
182         struct rb_node *n;
183         u64 end, start;
184
185         spin_lock(&info->block_group_cache_lock);
186         n = info->block_group_cache_tree.rb_node;
187
188         while (n) {
189                 cache = rb_entry(n, struct btrfs_block_group_cache,
190                                  cache_node);
191                 end = cache->key.objectid + cache->key.offset - 1;
192                 start = cache->key.objectid;
193
194                 if (bytenr < start) {
195                         if (!contains && (!ret || start < ret->key.objectid))
196                                 ret = cache;
197                         n = n->rb_left;
198                 } else if (bytenr > start) {
199                         if (contains && bytenr <= end) {
200                                 ret = cache;
201                                 break;
202                         }
203                         n = n->rb_right;
204                 } else {
205                         ret = cache;
206                         break;
207                 }
208         }
209         if (ret) {
210                 btrfs_get_block_group(ret);
211                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
212                         info->first_logical_byte = ret->key.objectid;
213         }
214         spin_unlock(&info->block_group_cache_lock);
215
216         return ret;
217 }
218
219 static int add_excluded_extent(struct btrfs_root *root,
220                                u64 start, u64 num_bytes)
221 {
222         u64 end = start + num_bytes - 1;
223         set_extent_bits(&root->fs_info->freed_extents[0],
224                         start, end, EXTENT_UPTODATE, GFP_NOFS);
225         set_extent_bits(&root->fs_info->freed_extents[1],
226                         start, end, EXTENT_UPTODATE, GFP_NOFS);
227         return 0;
228 }
229
230 static void free_excluded_extents(struct btrfs_root *root,
231                                   struct btrfs_block_group_cache *cache)
232 {
233         u64 start, end;
234
235         start = cache->key.objectid;
236         end = start + cache->key.offset - 1;
237
238         clear_extent_bits(&root->fs_info->freed_extents[0],
239                           start, end, EXTENT_UPTODATE, GFP_NOFS);
240         clear_extent_bits(&root->fs_info->freed_extents[1],
241                           start, end, EXTENT_UPTODATE, GFP_NOFS);
242 }
243
244 static int exclude_super_stripes(struct btrfs_root *root,
245                                  struct btrfs_block_group_cache *cache)
246 {
247         u64 bytenr;
248         u64 *logical;
249         int stripe_len;
250         int i, nr, ret;
251
252         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
253                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
254                 cache->bytes_super += stripe_len;
255                 ret = add_excluded_extent(root, cache->key.objectid,
256                                           stripe_len);
257                 BUG_ON(ret); /* -ENOMEM */
258         }
259
260         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
261                 bytenr = btrfs_sb_offset(i);
262                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
263                                        cache->key.objectid, bytenr,
264                                        0, &logical, &nr, &stripe_len);
265                 BUG_ON(ret); /* -ENOMEM */
266
267                 while (nr--) {
268                         cache->bytes_super += stripe_len;
269                         ret = add_excluded_extent(root, logical[nr],
270                                                   stripe_len);
271                         BUG_ON(ret); /* -ENOMEM */
272                 }
273
274                 kfree(logical);
275         }
276         return 0;
277 }
278
279 static struct btrfs_caching_control *
280 get_caching_control(struct btrfs_block_group_cache *cache)
281 {
282         struct btrfs_caching_control *ctl;
283
284         spin_lock(&cache->lock);
285         if (cache->cached != BTRFS_CACHE_STARTED) {
286                 spin_unlock(&cache->lock);
287                 return NULL;
288         }
289
290         /* We're loading it the fast way, so we don't have a caching_ctl. */
291         if (!cache->caching_ctl) {
292                 spin_unlock(&cache->lock);
293                 return NULL;
294         }
295
296         ctl = cache->caching_ctl;
297         atomic_inc(&ctl->count);
298         spin_unlock(&cache->lock);
299         return ctl;
300 }
301
302 static void put_caching_control(struct btrfs_caching_control *ctl)
303 {
304         if (atomic_dec_and_test(&ctl->count))
305                 kfree(ctl);
306 }
307
308 /*
309  * this is only called by cache_block_group, since we could have freed extents
310  * we need to check the pinned_extents for any extents that can't be used yet
311  * since their free space will be released as soon as the transaction commits.
312  */
313 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
314                               struct btrfs_fs_info *info, u64 start, u64 end)
315 {
316         u64 extent_start, extent_end, size, total_added = 0;
317         int ret;
318
319         while (start < end) {
320                 ret = find_first_extent_bit(info->pinned_extents, start,
321                                             &extent_start, &extent_end,
322                                             EXTENT_DIRTY | EXTENT_UPTODATE,
323                                             NULL);
324                 if (ret)
325                         break;
326
327                 if (extent_start <= start) {
328                         start = extent_end + 1;
329                 } else if (extent_start > start && extent_start < end) {
330                         size = extent_start - start;
331                         total_added += size;
332                         ret = btrfs_add_free_space(block_group, start,
333                                                    size);
334                         BUG_ON(ret); /* -ENOMEM or logic error */
335                         start = extent_end + 1;
336                 } else {
337                         break;
338                 }
339         }
340
341         if (start < end) {
342                 size = end - start;
343                 total_added += size;
344                 ret = btrfs_add_free_space(block_group, start, size);
345                 BUG_ON(ret); /* -ENOMEM or logic error */
346         }
347
348         return total_added;
349 }
350
351 static noinline void caching_thread(struct btrfs_work *work)
352 {
353         struct btrfs_block_group_cache *block_group;
354         struct btrfs_fs_info *fs_info;
355         struct btrfs_caching_control *caching_ctl;
356         struct btrfs_root *extent_root;
357         struct btrfs_path *path;
358         struct extent_buffer *leaf;
359         struct btrfs_key key;
360         u64 total_found = 0;
361         u64 last = 0;
362         u32 nritems;
363         int ret = 0;
364
365         caching_ctl = container_of(work, struct btrfs_caching_control, work);
366         block_group = caching_ctl->block_group;
367         fs_info = block_group->fs_info;
368         extent_root = fs_info->extent_root;
369
370         path = btrfs_alloc_path();
371         if (!path)
372                 goto out;
373
374         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
375
376         /*
377          * We don't want to deadlock with somebody trying to allocate a new
378          * extent for the extent root while also trying to search the extent
379          * root to add free space.  So we skip locking and search the commit
380          * root, since its read-only
381          */
382         path->skip_locking = 1;
383         path->search_commit_root = 1;
384         path->reada = 1;
385
386         key.objectid = last;
387         key.offset = 0;
388         key.type = BTRFS_EXTENT_ITEM_KEY;
389 again:
390         mutex_lock(&caching_ctl->mutex);
391         /* need to make sure the commit_root doesn't disappear */
392         down_read(&fs_info->extent_commit_sem);
393
394         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
395         if (ret < 0)
396                 goto err;
397
398         leaf = path->nodes[0];
399         nritems = btrfs_header_nritems(leaf);
400
401         while (1) {
402                 if (btrfs_fs_closing(fs_info) > 1) {
403                         last = (u64)-1;
404                         break;
405                 }
406
407                 if (path->slots[0] < nritems) {
408                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
409                 } else {
410                         ret = find_next_key(path, 0, &key);
411                         if (ret)
412                                 break;
413
414                         if (need_resched() ||
415                             btrfs_next_leaf(extent_root, path)) {
416                                 caching_ctl->progress = last;
417                                 btrfs_release_path(path);
418                                 up_read(&fs_info->extent_commit_sem);
419                                 mutex_unlock(&caching_ctl->mutex);
420                                 cond_resched();
421                                 goto again;
422                         }
423                         leaf = path->nodes[0];
424                         nritems = btrfs_header_nritems(leaf);
425                         continue;
426                 }
427
428                 if (key.objectid < block_group->key.objectid) {
429                         path->slots[0]++;
430                         continue;
431                 }
432
433                 if (key.objectid >= block_group->key.objectid +
434                     block_group->key.offset)
435                         break;
436
437                 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
438                         total_found += add_new_free_space(block_group,
439                                                           fs_info, last,
440                                                           key.objectid);
441                         last = key.objectid + key.offset;
442
443                         if (total_found > (1024 * 1024 * 2)) {
444                                 total_found = 0;
445                                 wake_up(&caching_ctl->wait);
446                         }
447                 }
448                 path->slots[0]++;
449         }
450         ret = 0;
451
452         total_found += add_new_free_space(block_group, fs_info, last,
453                                           block_group->key.objectid +
454                                           block_group->key.offset);
455         caching_ctl->progress = (u64)-1;
456
457         spin_lock(&block_group->lock);
458         block_group->caching_ctl = NULL;
459         block_group->cached = BTRFS_CACHE_FINISHED;
460         spin_unlock(&block_group->lock);
461
462 err:
463         btrfs_free_path(path);
464         up_read(&fs_info->extent_commit_sem);
465
466         free_excluded_extents(extent_root, block_group);
467
468         mutex_unlock(&caching_ctl->mutex);
469 out:
470         wake_up(&caching_ctl->wait);
471
472         put_caching_control(caching_ctl);
473         btrfs_put_block_group(block_group);
474 }
475
476 static int cache_block_group(struct btrfs_block_group_cache *cache,
477                              int load_cache_only)
478 {
479         DEFINE_WAIT(wait);
480         struct btrfs_fs_info *fs_info = cache->fs_info;
481         struct btrfs_caching_control *caching_ctl;
482         int ret = 0;
483
484         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
485         if (!caching_ctl)
486                 return -ENOMEM;
487
488         INIT_LIST_HEAD(&caching_ctl->list);
489         mutex_init(&caching_ctl->mutex);
490         init_waitqueue_head(&caching_ctl->wait);
491         caching_ctl->block_group = cache;
492         caching_ctl->progress = cache->key.objectid;
493         atomic_set(&caching_ctl->count, 1);
494         caching_ctl->work.func = caching_thread;
495
496         spin_lock(&cache->lock);
497         /*
498          * This should be a rare occasion, but this could happen I think in the
499          * case where one thread starts to load the space cache info, and then
500          * some other thread starts a transaction commit which tries to do an
501          * allocation while the other thread is still loading the space cache
502          * info.  The previous loop should have kept us from choosing this block
503          * group, but if we've moved to the state where we will wait on caching
504          * block groups we need to first check if we're doing a fast load here,
505          * so we can wait for it to finish, otherwise we could end up allocating
506          * from a block group who's cache gets evicted for one reason or
507          * another.
508          */
509         while (cache->cached == BTRFS_CACHE_FAST) {
510                 struct btrfs_caching_control *ctl;
511
512                 ctl = cache->caching_ctl;
513                 atomic_inc(&ctl->count);
514                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
515                 spin_unlock(&cache->lock);
516
517                 schedule();
518
519                 finish_wait(&ctl->wait, &wait);
520                 put_caching_control(ctl);
521                 spin_lock(&cache->lock);
522         }
523
524         if (cache->cached != BTRFS_CACHE_NO) {
525                 spin_unlock(&cache->lock);
526                 kfree(caching_ctl);
527                 return 0;
528         }
529         WARN_ON(cache->caching_ctl);
530         cache->caching_ctl = caching_ctl;
531         cache->cached = BTRFS_CACHE_FAST;
532         spin_unlock(&cache->lock);
533
534         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
535                 ret = load_free_space_cache(fs_info, cache);
536
537                 spin_lock(&cache->lock);
538                 if (ret == 1) {
539                         cache->caching_ctl = NULL;
540                         cache->cached = BTRFS_CACHE_FINISHED;
541                         cache->last_byte_to_unpin = (u64)-1;
542                 } else {
543                         if (load_cache_only) {
544                                 cache->caching_ctl = NULL;
545                                 cache->cached = BTRFS_CACHE_NO;
546                         } else {
547                                 cache->cached = BTRFS_CACHE_STARTED;
548                         }
549                 }
550                 spin_unlock(&cache->lock);
551                 wake_up(&caching_ctl->wait);
552                 if (ret == 1) {
553                         put_caching_control(caching_ctl);
554                         free_excluded_extents(fs_info->extent_root, cache);
555                         return 0;
556                 }
557         } else {
558                 /*
559                  * We are not going to do the fast caching, set cached to the
560                  * appropriate value and wakeup any waiters.
561                  */
562                 spin_lock(&cache->lock);
563                 if (load_cache_only) {
564                         cache->caching_ctl = NULL;
565                         cache->cached = BTRFS_CACHE_NO;
566                 } else {
567                         cache->cached = BTRFS_CACHE_STARTED;
568                 }
569                 spin_unlock(&cache->lock);
570                 wake_up(&caching_ctl->wait);
571         }
572
573         if (load_cache_only) {
574                 put_caching_control(caching_ctl);
575                 return 0;
576         }
577
578         down_write(&fs_info->extent_commit_sem);
579         atomic_inc(&caching_ctl->count);
580         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
581         up_write(&fs_info->extent_commit_sem);
582
583         btrfs_get_block_group(cache);
584
585         btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
586
587         return ret;
588 }
589
590 /*
591  * return the block group that starts at or after bytenr
592  */
593 static struct btrfs_block_group_cache *
594 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
595 {
596         struct btrfs_block_group_cache *cache;
597
598         cache = block_group_cache_tree_search(info, bytenr, 0);
599
600         return cache;
601 }
602
603 /*
604  * return the block group that contains the given bytenr
605  */
606 struct btrfs_block_group_cache *btrfs_lookup_block_group(
607                                                  struct btrfs_fs_info *info,
608                                                  u64 bytenr)
609 {
610         struct btrfs_block_group_cache *cache;
611
612         cache = block_group_cache_tree_search(info, bytenr, 1);
613
614         return cache;
615 }
616
617 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
618                                                   u64 flags)
619 {
620         struct list_head *head = &info->space_info;
621         struct btrfs_space_info *found;
622
623         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
624
625         rcu_read_lock();
626         list_for_each_entry_rcu(found, head, list) {
627                 if (found->flags & flags) {
628                         rcu_read_unlock();
629                         return found;
630                 }
631         }
632         rcu_read_unlock();
633         return NULL;
634 }
635
636 /*
637  * after adding space to the filesystem, we need to clear the full flags
638  * on all the space infos.
639  */
640 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
641 {
642         struct list_head *head = &info->space_info;
643         struct btrfs_space_info *found;
644
645         rcu_read_lock();
646         list_for_each_entry_rcu(found, head, list)
647                 found->full = 0;
648         rcu_read_unlock();
649 }
650
651 u64 btrfs_find_block_group(struct btrfs_root *root,
652                            u64 search_start, u64 search_hint, int owner)
653 {
654         struct btrfs_block_group_cache *cache;
655         u64 used;
656         u64 last = max(search_hint, search_start);
657         u64 group_start = 0;
658         int full_search = 0;
659         int factor = 9;
660         int wrapped = 0;
661 again:
662         while (1) {
663                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
664                 if (!cache)
665                         break;
666
667                 spin_lock(&cache->lock);
668                 last = cache->key.objectid + cache->key.offset;
669                 used = btrfs_block_group_used(&cache->item);
670
671                 if ((full_search || !cache->ro) &&
672                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
673                         if (used + cache->pinned + cache->reserved <
674                             div_factor(cache->key.offset, factor)) {
675                                 group_start = cache->key.objectid;
676                                 spin_unlock(&cache->lock);
677                                 btrfs_put_block_group(cache);
678                                 goto found;
679                         }
680                 }
681                 spin_unlock(&cache->lock);
682                 btrfs_put_block_group(cache);
683                 cond_resched();
684         }
685         if (!wrapped) {
686                 last = search_start;
687                 wrapped = 1;
688                 goto again;
689         }
690         if (!full_search && factor < 10) {
691                 last = search_start;
692                 full_search = 1;
693                 factor = 10;
694                 goto again;
695         }
696 found:
697         return group_start;
698 }
699
700 /* simple helper to search for an existing extent at a given offset */
701 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
702 {
703         int ret;
704         struct btrfs_key key;
705         struct btrfs_path *path;
706
707         path = btrfs_alloc_path();
708         if (!path)
709                 return -ENOMEM;
710
711         key.objectid = start;
712         key.offset = len;
713         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
714         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
715                                 0, 0);
716         btrfs_free_path(path);
717         return ret;
718 }
719
720 /*
721  * helper function to lookup reference count and flags of extent.
722  *
723  * the head node for delayed ref is used to store the sum of all the
724  * reference count modifications queued up in the rbtree. the head
725  * node may also store the extent flags to set. This way you can check
726  * to see what the reference count and extent flags would be if all of
727  * the delayed refs are not processed.
728  */
729 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
730                              struct btrfs_root *root, u64 bytenr,
731                              u64 num_bytes, u64 *refs, u64 *flags)
732 {
733         struct btrfs_delayed_ref_head *head;
734         struct btrfs_delayed_ref_root *delayed_refs;
735         struct btrfs_path *path;
736         struct btrfs_extent_item *ei;
737         struct extent_buffer *leaf;
738         struct btrfs_key key;
739         u32 item_size;
740         u64 num_refs;
741         u64 extent_flags;
742         int ret;
743
744         path = btrfs_alloc_path();
745         if (!path)
746                 return -ENOMEM;
747
748         key.objectid = bytenr;
749         key.type = BTRFS_EXTENT_ITEM_KEY;
750         key.offset = num_bytes;
751         if (!trans) {
752                 path->skip_locking = 1;
753                 path->search_commit_root = 1;
754         }
755 again:
756         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
757                                 &key, path, 0, 0);
758         if (ret < 0)
759                 goto out_free;
760
761         if (ret == 0) {
762                 leaf = path->nodes[0];
763                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
764                 if (item_size >= sizeof(*ei)) {
765                         ei = btrfs_item_ptr(leaf, path->slots[0],
766                                             struct btrfs_extent_item);
767                         num_refs = btrfs_extent_refs(leaf, ei);
768                         extent_flags = btrfs_extent_flags(leaf, ei);
769                 } else {
770 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
771                         struct btrfs_extent_item_v0 *ei0;
772                         BUG_ON(item_size != sizeof(*ei0));
773                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
774                                              struct btrfs_extent_item_v0);
775                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
776                         /* FIXME: this isn't correct for data */
777                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
778 #else
779                         BUG();
780 #endif
781                 }
782                 BUG_ON(num_refs == 0);
783         } else {
784                 num_refs = 0;
785                 extent_flags = 0;
786                 ret = 0;
787         }
788
789         if (!trans)
790                 goto out;
791
792         delayed_refs = &trans->transaction->delayed_refs;
793         spin_lock(&delayed_refs->lock);
794         head = btrfs_find_delayed_ref_head(trans, bytenr);
795         if (head) {
796                 if (!mutex_trylock(&head->mutex)) {
797                         atomic_inc(&head->node.refs);
798                         spin_unlock(&delayed_refs->lock);
799
800                         btrfs_release_path(path);
801
802                         /*
803                          * Mutex was contended, block until it's released and try
804                          * again
805                          */
806                         mutex_lock(&head->mutex);
807                         mutex_unlock(&head->mutex);
808                         btrfs_put_delayed_ref(&head->node);
809                         goto again;
810                 }
811                 if (head->extent_op && head->extent_op->update_flags)
812                         extent_flags |= head->extent_op->flags_to_set;
813                 else
814                         BUG_ON(num_refs == 0);
815
816                 num_refs += head->node.ref_mod;
817                 mutex_unlock(&head->mutex);
818         }
819         spin_unlock(&delayed_refs->lock);
820 out:
821         WARN_ON(num_refs == 0);
822         if (refs)
823                 *refs = num_refs;
824         if (flags)
825                 *flags = extent_flags;
826 out_free:
827         btrfs_free_path(path);
828         return ret;
829 }
830
831 /*
832  * Back reference rules.  Back refs have three main goals:
833  *
834  * 1) differentiate between all holders of references to an extent so that
835  *    when a reference is dropped we can make sure it was a valid reference
836  *    before freeing the extent.
837  *
838  * 2) Provide enough information to quickly find the holders of an extent
839  *    if we notice a given block is corrupted or bad.
840  *
841  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
842  *    maintenance.  This is actually the same as #2, but with a slightly
843  *    different use case.
844  *
845  * There are two kinds of back refs. The implicit back refs is optimized
846  * for pointers in non-shared tree blocks. For a given pointer in a block,
847  * back refs of this kind provide information about the block's owner tree
848  * and the pointer's key. These information allow us to find the block by
849  * b-tree searching. The full back refs is for pointers in tree blocks not
850  * referenced by their owner trees. The location of tree block is recorded
851  * in the back refs. Actually the full back refs is generic, and can be
852  * used in all cases the implicit back refs is used. The major shortcoming
853  * of the full back refs is its overhead. Every time a tree block gets
854  * COWed, we have to update back refs entry for all pointers in it.
855  *
856  * For a newly allocated tree block, we use implicit back refs for
857  * pointers in it. This means most tree related operations only involve
858  * implicit back refs. For a tree block created in old transaction, the
859  * only way to drop a reference to it is COW it. So we can detect the
860  * event that tree block loses its owner tree's reference and do the
861  * back refs conversion.
862  *
863  * When a tree block is COW'd through a tree, there are four cases:
864  *
865  * The reference count of the block is one and the tree is the block's
866  * owner tree. Nothing to do in this case.
867  *
868  * The reference count of the block is one and the tree is not the
869  * block's owner tree. In this case, full back refs is used for pointers
870  * in the block. Remove these full back refs, add implicit back refs for
871  * every pointers in the new block.
872  *
873  * The reference count of the block is greater than one and the tree is
874  * the block's owner tree. In this case, implicit back refs is used for
875  * pointers in the block. Add full back refs for every pointers in the
876  * block, increase lower level extents' reference counts. The original
877  * implicit back refs are entailed to the new block.
878  *
879  * The reference count of the block is greater than one and the tree is
880  * not the block's owner tree. Add implicit back refs for every pointer in
881  * the new block, increase lower level extents' reference count.
882  *
883  * Back Reference Key composing:
884  *
885  * The key objectid corresponds to the first byte in the extent,
886  * The key type is used to differentiate between types of back refs.
887  * There are different meanings of the key offset for different types
888  * of back refs.
889  *
890  * File extents can be referenced by:
891  *
892  * - multiple snapshots, subvolumes, or different generations in one subvol
893  * - different files inside a single subvolume
894  * - different offsets inside a file (bookend extents in file.c)
895  *
896  * The extent ref structure for the implicit back refs has fields for:
897  *
898  * - Objectid of the subvolume root
899  * - objectid of the file holding the reference
900  * - original offset in the file
901  * - how many bookend extents
902  *
903  * The key offset for the implicit back refs is hash of the first
904  * three fields.
905  *
906  * The extent ref structure for the full back refs has field for:
907  *
908  * - number of pointers in the tree leaf
909  *
910  * The key offset for the implicit back refs is the first byte of
911  * the tree leaf
912  *
913  * When a file extent is allocated, The implicit back refs is used.
914  * the fields are filled in:
915  *
916  *     (root_key.objectid, inode objectid, offset in file, 1)
917  *
918  * When a file extent is removed file truncation, we find the
919  * corresponding implicit back refs and check the following fields:
920  *
921  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
922  *
923  * Btree extents can be referenced by:
924  *
925  * - Different subvolumes
926  *
927  * Both the implicit back refs and the full back refs for tree blocks
928  * only consist of key. The key offset for the implicit back refs is
929  * objectid of block's owner tree. The key offset for the full back refs
930  * is the first byte of parent block.
931  *
932  * When implicit back refs is used, information about the lowest key and
933  * level of the tree block are required. These information are stored in
934  * tree block info structure.
935  */
936
937 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
938 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
939                                   struct btrfs_root *root,
940                                   struct btrfs_path *path,
941                                   u64 owner, u32 extra_size)
942 {
943         struct btrfs_extent_item *item;
944         struct btrfs_extent_item_v0 *ei0;
945         struct btrfs_extent_ref_v0 *ref0;
946         struct btrfs_tree_block_info *bi;
947         struct extent_buffer *leaf;
948         struct btrfs_key key;
949         struct btrfs_key found_key;
950         u32 new_size = sizeof(*item);
951         u64 refs;
952         int ret;
953
954         leaf = path->nodes[0];
955         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
956
957         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
958         ei0 = btrfs_item_ptr(leaf, path->slots[0],
959                              struct btrfs_extent_item_v0);
960         refs = btrfs_extent_refs_v0(leaf, ei0);
961
962         if (owner == (u64)-1) {
963                 while (1) {
964                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
965                                 ret = btrfs_next_leaf(root, path);
966                                 if (ret < 0)
967                                         return ret;
968                                 BUG_ON(ret > 0); /* Corruption */
969                                 leaf = path->nodes[0];
970                         }
971                         btrfs_item_key_to_cpu(leaf, &found_key,
972                                               path->slots[0]);
973                         BUG_ON(key.objectid != found_key.objectid);
974                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
975                                 path->slots[0]++;
976                                 continue;
977                         }
978                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
979                                               struct btrfs_extent_ref_v0);
980                         owner = btrfs_ref_objectid_v0(leaf, ref0);
981                         break;
982                 }
983         }
984         btrfs_release_path(path);
985
986         if (owner < BTRFS_FIRST_FREE_OBJECTID)
987                 new_size += sizeof(*bi);
988
989         new_size -= sizeof(*ei0);
990         ret = btrfs_search_slot(trans, root, &key, path,
991                                 new_size + extra_size, 1);
992         if (ret < 0)
993                 return ret;
994         BUG_ON(ret); /* Corruption */
995
996         btrfs_extend_item(trans, root, path, new_size);
997
998         leaf = path->nodes[0];
999         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1000         btrfs_set_extent_refs(leaf, item, refs);
1001         /* FIXME: get real generation */
1002         btrfs_set_extent_generation(leaf, item, 0);
1003         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1004                 btrfs_set_extent_flags(leaf, item,
1005                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1006                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1007                 bi = (struct btrfs_tree_block_info *)(item + 1);
1008                 /* FIXME: get first key of the block */
1009                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1010                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1011         } else {
1012                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1013         }
1014         btrfs_mark_buffer_dirty(leaf);
1015         return 0;
1016 }
1017 #endif
1018
1019 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1020 {
1021         u32 high_crc = ~(u32)0;
1022         u32 low_crc = ~(u32)0;
1023         __le64 lenum;
1024
1025         lenum = cpu_to_le64(root_objectid);
1026         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1027         lenum = cpu_to_le64(owner);
1028         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1029         lenum = cpu_to_le64(offset);
1030         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1031
1032         return ((u64)high_crc << 31) ^ (u64)low_crc;
1033 }
1034
1035 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1036                                      struct btrfs_extent_data_ref *ref)
1037 {
1038         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1039                                     btrfs_extent_data_ref_objectid(leaf, ref),
1040                                     btrfs_extent_data_ref_offset(leaf, ref));
1041 }
1042
1043 static int match_extent_data_ref(struct extent_buffer *leaf,
1044                                  struct btrfs_extent_data_ref *ref,
1045                                  u64 root_objectid, u64 owner, u64 offset)
1046 {
1047         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1048             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1049             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1050                 return 0;
1051         return 1;
1052 }
1053
1054 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1055                                            struct btrfs_root *root,
1056                                            struct btrfs_path *path,
1057                                            u64 bytenr, u64 parent,
1058                                            u64 root_objectid,
1059                                            u64 owner, u64 offset)
1060 {
1061         struct btrfs_key key;
1062         struct btrfs_extent_data_ref *ref;
1063         struct extent_buffer *leaf;
1064         u32 nritems;
1065         int ret;
1066         int recow;
1067         int err = -ENOENT;
1068
1069         key.objectid = bytenr;
1070         if (parent) {
1071                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1072                 key.offset = parent;
1073         } else {
1074                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1075                 key.offset = hash_extent_data_ref(root_objectid,
1076                                                   owner, offset);
1077         }
1078 again:
1079         recow = 0;
1080         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1081         if (ret < 0) {
1082                 err = ret;
1083                 goto fail;
1084         }
1085
1086         if (parent) {
1087                 if (!ret)
1088                         return 0;
1089 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1090                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1091                 btrfs_release_path(path);
1092                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1093                 if (ret < 0) {
1094                         err = ret;
1095                         goto fail;
1096                 }
1097                 if (!ret)
1098                         return 0;
1099 #endif
1100                 goto fail;
1101         }
1102
1103         leaf = path->nodes[0];
1104         nritems = btrfs_header_nritems(leaf);
1105         while (1) {
1106                 if (path->slots[0] >= nritems) {
1107                         ret = btrfs_next_leaf(root, path);
1108                         if (ret < 0)
1109                                 err = ret;
1110                         if (ret)
1111                                 goto fail;
1112
1113                         leaf = path->nodes[0];
1114                         nritems = btrfs_header_nritems(leaf);
1115                         recow = 1;
1116                 }
1117
1118                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1119                 if (key.objectid != bytenr ||
1120                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1121                         goto fail;
1122
1123                 ref = btrfs_item_ptr(leaf, path->slots[0],
1124                                      struct btrfs_extent_data_ref);
1125
1126                 if (match_extent_data_ref(leaf, ref, root_objectid,
1127                                           owner, offset)) {
1128                         if (recow) {
1129                                 btrfs_release_path(path);
1130                                 goto again;
1131                         }
1132                         err = 0;
1133                         break;
1134                 }
1135                 path->slots[0]++;
1136         }
1137 fail:
1138         return err;
1139 }
1140
1141 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1142                                            struct btrfs_root *root,
1143                                            struct btrfs_path *path,
1144                                            u64 bytenr, u64 parent,
1145                                            u64 root_objectid, u64 owner,
1146                                            u64 offset, int refs_to_add)
1147 {
1148         struct btrfs_key key;
1149         struct extent_buffer *leaf;
1150         u32 size;
1151         u32 num_refs;
1152         int ret;
1153
1154         key.objectid = bytenr;
1155         if (parent) {
1156                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1157                 key.offset = parent;
1158                 size = sizeof(struct btrfs_shared_data_ref);
1159         } else {
1160                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1161                 key.offset = hash_extent_data_ref(root_objectid,
1162                                                   owner, offset);
1163                 size = sizeof(struct btrfs_extent_data_ref);
1164         }
1165
1166         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1167         if (ret && ret != -EEXIST)
1168                 goto fail;
1169
1170         leaf = path->nodes[0];
1171         if (parent) {
1172                 struct btrfs_shared_data_ref *ref;
1173                 ref = btrfs_item_ptr(leaf, path->slots[0],
1174                                      struct btrfs_shared_data_ref);
1175                 if (ret == 0) {
1176                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1177                 } else {
1178                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1179                         num_refs += refs_to_add;
1180                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1181                 }
1182         } else {
1183                 struct btrfs_extent_data_ref *ref;
1184                 while (ret == -EEXIST) {
1185                         ref = btrfs_item_ptr(leaf, path->slots[0],
1186                                              struct btrfs_extent_data_ref);
1187                         if (match_extent_data_ref(leaf, ref, root_objectid,
1188                                                   owner, offset))
1189                                 break;
1190                         btrfs_release_path(path);
1191                         key.offset++;
1192                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1193                                                       size);
1194                         if (ret && ret != -EEXIST)
1195                                 goto fail;
1196
1197                         leaf = path->nodes[0];
1198                 }
1199                 ref = btrfs_item_ptr(leaf, path->slots[0],
1200                                      struct btrfs_extent_data_ref);
1201                 if (ret == 0) {
1202                         btrfs_set_extent_data_ref_root(leaf, ref,
1203                                                        root_objectid);
1204                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1205                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1206                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1207                 } else {
1208                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1209                         num_refs += refs_to_add;
1210                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1211                 }
1212         }
1213         btrfs_mark_buffer_dirty(leaf);
1214         ret = 0;
1215 fail:
1216         btrfs_release_path(path);
1217         return ret;
1218 }
1219
1220 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1221                                            struct btrfs_root *root,
1222                                            struct btrfs_path *path,
1223                                            int refs_to_drop)
1224 {
1225         struct btrfs_key key;
1226         struct btrfs_extent_data_ref *ref1 = NULL;
1227         struct btrfs_shared_data_ref *ref2 = NULL;
1228         struct extent_buffer *leaf;
1229         u32 num_refs = 0;
1230         int ret = 0;
1231
1232         leaf = path->nodes[0];
1233         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1234
1235         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1236                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1237                                       struct btrfs_extent_data_ref);
1238                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1239         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1240                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1241                                       struct btrfs_shared_data_ref);
1242                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1243 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1244         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1245                 struct btrfs_extent_ref_v0 *ref0;
1246                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1247                                       struct btrfs_extent_ref_v0);
1248                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1249 #endif
1250         } else {
1251                 BUG();
1252         }
1253
1254         BUG_ON(num_refs < refs_to_drop);
1255         num_refs -= refs_to_drop;
1256
1257         if (num_refs == 0) {
1258                 ret = btrfs_del_item(trans, root, path);
1259         } else {
1260                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1261                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1262                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1263                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1264 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1265                 else {
1266                         struct btrfs_extent_ref_v0 *ref0;
1267                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1268                                         struct btrfs_extent_ref_v0);
1269                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1270                 }
1271 #endif
1272                 btrfs_mark_buffer_dirty(leaf);
1273         }
1274         return ret;
1275 }
1276
1277 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1278                                           struct btrfs_path *path,
1279                                           struct btrfs_extent_inline_ref *iref)
1280 {
1281         struct btrfs_key key;
1282         struct extent_buffer *leaf;
1283         struct btrfs_extent_data_ref *ref1;
1284         struct btrfs_shared_data_ref *ref2;
1285         u32 num_refs = 0;
1286
1287         leaf = path->nodes[0];
1288         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1289         if (iref) {
1290                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1291                     BTRFS_EXTENT_DATA_REF_KEY) {
1292                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1293                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1294                 } else {
1295                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1296                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1297                 }
1298         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1299                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1300                                       struct btrfs_extent_data_ref);
1301                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1302         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1303                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1304                                       struct btrfs_shared_data_ref);
1305                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1306 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1307         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1308                 struct btrfs_extent_ref_v0 *ref0;
1309                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1310                                       struct btrfs_extent_ref_v0);
1311                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1312 #endif
1313         } else {
1314                 WARN_ON(1);
1315         }
1316         return num_refs;
1317 }
1318
1319 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1320                                           struct btrfs_root *root,
1321                                           struct btrfs_path *path,
1322                                           u64 bytenr, u64 parent,
1323                                           u64 root_objectid)
1324 {
1325         struct btrfs_key key;
1326         int ret;
1327
1328         key.objectid = bytenr;
1329         if (parent) {
1330                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1331                 key.offset = parent;
1332         } else {
1333                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1334                 key.offset = root_objectid;
1335         }
1336
1337         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1338         if (ret > 0)
1339                 ret = -ENOENT;
1340 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1341         if (ret == -ENOENT && parent) {
1342                 btrfs_release_path(path);
1343                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1344                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1345                 if (ret > 0)
1346                         ret = -ENOENT;
1347         }
1348 #endif
1349         return ret;
1350 }
1351
1352 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1353                                           struct btrfs_root *root,
1354                                           struct btrfs_path *path,
1355                                           u64 bytenr, u64 parent,
1356                                           u64 root_objectid)
1357 {
1358         struct btrfs_key key;
1359         int ret;
1360
1361         key.objectid = bytenr;
1362         if (parent) {
1363                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1364                 key.offset = parent;
1365         } else {
1366                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1367                 key.offset = root_objectid;
1368         }
1369
1370         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1371         btrfs_release_path(path);
1372         return ret;
1373 }
1374
1375 static inline int extent_ref_type(u64 parent, u64 owner)
1376 {
1377         int type;
1378         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1379                 if (parent > 0)
1380                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1381                 else
1382                         type = BTRFS_TREE_BLOCK_REF_KEY;
1383         } else {
1384                 if (parent > 0)
1385                         type = BTRFS_SHARED_DATA_REF_KEY;
1386                 else
1387                         type = BTRFS_EXTENT_DATA_REF_KEY;
1388         }
1389         return type;
1390 }
1391
1392 static int find_next_key(struct btrfs_path *path, int level,
1393                          struct btrfs_key *key)
1394
1395 {
1396         for (; level < BTRFS_MAX_LEVEL; level++) {
1397                 if (!path->nodes[level])
1398                         break;
1399                 if (path->slots[level] + 1 >=
1400                     btrfs_header_nritems(path->nodes[level]))
1401                         continue;
1402                 if (level == 0)
1403                         btrfs_item_key_to_cpu(path->nodes[level], key,
1404                                               path->slots[level] + 1);
1405                 else
1406                         btrfs_node_key_to_cpu(path->nodes[level], key,
1407                                               path->slots[level] + 1);
1408                 return 0;
1409         }
1410         return 1;
1411 }
1412
1413 /*
1414  * look for inline back ref. if back ref is found, *ref_ret is set
1415  * to the address of inline back ref, and 0 is returned.
1416  *
1417  * if back ref isn't found, *ref_ret is set to the address where it
1418  * should be inserted, and -ENOENT is returned.
1419  *
1420  * if insert is true and there are too many inline back refs, the path
1421  * points to the extent item, and -EAGAIN is returned.
1422  *
1423  * NOTE: inline back refs are ordered in the same way that back ref
1424  *       items in the tree are ordered.
1425  */
1426 static noinline_for_stack
1427 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1428                                  struct btrfs_root *root,
1429                                  struct btrfs_path *path,
1430                                  struct btrfs_extent_inline_ref **ref_ret,
1431                                  u64 bytenr, u64 num_bytes,
1432                                  u64 parent, u64 root_objectid,
1433                                  u64 owner, u64 offset, int insert)
1434 {
1435         struct btrfs_key key;
1436         struct extent_buffer *leaf;
1437         struct btrfs_extent_item *ei;
1438         struct btrfs_extent_inline_ref *iref;
1439         u64 flags;
1440         u64 item_size;
1441         unsigned long ptr;
1442         unsigned long end;
1443         int extra_size;
1444         int type;
1445         int want;
1446         int ret;
1447         int err = 0;
1448
1449         key.objectid = bytenr;
1450         key.type = BTRFS_EXTENT_ITEM_KEY;
1451         key.offset = num_bytes;
1452
1453         want = extent_ref_type(parent, owner);
1454         if (insert) {
1455                 extra_size = btrfs_extent_inline_ref_size(want);
1456                 path->keep_locks = 1;
1457         } else
1458                 extra_size = -1;
1459         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1460         if (ret < 0) {
1461                 err = ret;
1462                 goto out;
1463         }
1464         if (ret && !insert) {
1465                 err = -ENOENT;
1466                 goto out;
1467         }
1468         BUG_ON(ret); /* Corruption */
1469
1470         leaf = path->nodes[0];
1471         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1472 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1473         if (item_size < sizeof(*ei)) {
1474                 if (!insert) {
1475                         err = -ENOENT;
1476                         goto out;
1477                 }
1478                 ret = convert_extent_item_v0(trans, root, path, owner,
1479                                              extra_size);
1480                 if (ret < 0) {
1481                         err = ret;
1482                         goto out;
1483                 }
1484                 leaf = path->nodes[0];
1485                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1486         }
1487 #endif
1488         BUG_ON(item_size < sizeof(*ei));
1489
1490         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1491         flags = btrfs_extent_flags(leaf, ei);
1492
1493         ptr = (unsigned long)(ei + 1);
1494         end = (unsigned long)ei + item_size;
1495
1496         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1497                 ptr += sizeof(struct btrfs_tree_block_info);
1498                 BUG_ON(ptr > end);
1499         } else {
1500                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1501         }
1502
1503         err = -ENOENT;
1504         while (1) {
1505                 if (ptr >= end) {
1506                         WARN_ON(ptr > end);
1507                         break;
1508                 }
1509                 iref = (struct btrfs_extent_inline_ref *)ptr;
1510                 type = btrfs_extent_inline_ref_type(leaf, iref);
1511                 if (want < type)
1512                         break;
1513                 if (want > type) {
1514                         ptr += btrfs_extent_inline_ref_size(type);
1515                         continue;
1516                 }
1517
1518                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1519                         struct btrfs_extent_data_ref *dref;
1520                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1521                         if (match_extent_data_ref(leaf, dref, root_objectid,
1522                                                   owner, offset)) {
1523                                 err = 0;
1524                                 break;
1525                         }
1526                         if (hash_extent_data_ref_item(leaf, dref) <
1527                             hash_extent_data_ref(root_objectid, owner, offset))
1528                                 break;
1529                 } else {
1530                         u64 ref_offset;
1531                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1532                         if (parent > 0) {
1533                                 if (parent == ref_offset) {
1534                                         err = 0;
1535                                         break;
1536                                 }
1537                                 if (ref_offset < parent)
1538                                         break;
1539                         } else {
1540                                 if (root_objectid == ref_offset) {
1541                                         err = 0;
1542                                         break;
1543                                 }
1544                                 if (ref_offset < root_objectid)
1545                                         break;
1546                         }
1547                 }
1548                 ptr += btrfs_extent_inline_ref_size(type);
1549         }
1550         if (err == -ENOENT && insert) {
1551                 if (item_size + extra_size >=
1552                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1553                         err = -EAGAIN;
1554                         goto out;
1555                 }
1556                 /*
1557                  * To add new inline back ref, we have to make sure
1558                  * there is no corresponding back ref item.
1559                  * For simplicity, we just do not add new inline back
1560                  * ref if there is any kind of item for this block
1561                  */
1562                 if (find_next_key(path, 0, &key) == 0 &&
1563                     key.objectid == bytenr &&
1564                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1565                         err = -EAGAIN;
1566                         goto out;
1567                 }
1568         }
1569         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1570 out:
1571         if (insert) {
1572                 path->keep_locks = 0;
1573                 btrfs_unlock_up_safe(path, 1);
1574         }
1575         return err;
1576 }
1577
1578 /*
1579  * helper to add new inline back ref
1580  */
1581 static noinline_for_stack
1582 void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1583                                  struct btrfs_root *root,
1584                                  struct btrfs_path *path,
1585                                  struct btrfs_extent_inline_ref *iref,
1586                                  u64 parent, u64 root_objectid,
1587                                  u64 owner, u64 offset, int refs_to_add,
1588                                  struct btrfs_delayed_extent_op *extent_op)
1589 {
1590         struct extent_buffer *leaf;
1591         struct btrfs_extent_item *ei;
1592         unsigned long ptr;
1593         unsigned long end;
1594         unsigned long item_offset;
1595         u64 refs;
1596         int size;
1597         int type;
1598
1599         leaf = path->nodes[0];
1600         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1601         item_offset = (unsigned long)iref - (unsigned long)ei;
1602
1603         type = extent_ref_type(parent, owner);
1604         size = btrfs_extent_inline_ref_size(type);
1605
1606         btrfs_extend_item(trans, root, path, size);
1607
1608         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1609         refs = btrfs_extent_refs(leaf, ei);
1610         refs += refs_to_add;
1611         btrfs_set_extent_refs(leaf, ei, refs);
1612         if (extent_op)
1613                 __run_delayed_extent_op(extent_op, leaf, ei);
1614
1615         ptr = (unsigned long)ei + item_offset;
1616         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1617         if (ptr < end - size)
1618                 memmove_extent_buffer(leaf, ptr + size, ptr,
1619                                       end - size - ptr);
1620
1621         iref = (struct btrfs_extent_inline_ref *)ptr;
1622         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1623         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1624                 struct btrfs_extent_data_ref *dref;
1625                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1626                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1627                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1628                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1629                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1630         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1631                 struct btrfs_shared_data_ref *sref;
1632                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1633                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1634                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1635         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1636                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1637         } else {
1638                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1639         }
1640         btrfs_mark_buffer_dirty(leaf);
1641 }
1642
1643 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1644                                  struct btrfs_root *root,
1645                                  struct btrfs_path *path,
1646                                  struct btrfs_extent_inline_ref **ref_ret,
1647                                  u64 bytenr, u64 num_bytes, u64 parent,
1648                                  u64 root_objectid, u64 owner, u64 offset)
1649 {
1650         int ret;
1651
1652         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1653                                            bytenr, num_bytes, parent,
1654                                            root_objectid, owner, offset, 0);
1655         if (ret != -ENOENT)
1656                 return ret;
1657
1658         btrfs_release_path(path);
1659         *ref_ret = NULL;
1660
1661         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1662                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1663                                             root_objectid);
1664         } else {
1665                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1666                                              root_objectid, owner, offset);
1667         }
1668         return ret;
1669 }
1670
1671 /*
1672  * helper to update/remove inline back ref
1673  */
1674 static noinline_for_stack
1675 void update_inline_extent_backref(struct btrfs_trans_handle *trans,
1676                                   struct btrfs_root *root,
1677                                   struct btrfs_path *path,
1678                                   struct btrfs_extent_inline_ref *iref,
1679                                   int refs_to_mod,
1680                                   struct btrfs_delayed_extent_op *extent_op)
1681 {
1682         struct extent_buffer *leaf;
1683         struct btrfs_extent_item *ei;
1684         struct btrfs_extent_data_ref *dref = NULL;
1685         struct btrfs_shared_data_ref *sref = NULL;
1686         unsigned long ptr;
1687         unsigned long end;
1688         u32 item_size;
1689         int size;
1690         int type;
1691         u64 refs;
1692
1693         leaf = path->nodes[0];
1694         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1695         refs = btrfs_extent_refs(leaf, ei);
1696         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1697         refs += refs_to_mod;
1698         btrfs_set_extent_refs(leaf, ei, refs);
1699         if (extent_op)
1700                 __run_delayed_extent_op(extent_op, leaf, ei);
1701
1702         type = btrfs_extent_inline_ref_type(leaf, iref);
1703
1704         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1705                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1706                 refs = btrfs_extent_data_ref_count(leaf, dref);
1707         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1708                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1709                 refs = btrfs_shared_data_ref_count(leaf, sref);
1710         } else {
1711                 refs = 1;
1712                 BUG_ON(refs_to_mod != -1);
1713         }
1714
1715         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1716         refs += refs_to_mod;
1717
1718         if (refs > 0) {
1719                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1720                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1721                 else
1722                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1723         } else {
1724                 size =  btrfs_extent_inline_ref_size(type);
1725                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1726                 ptr = (unsigned long)iref;
1727                 end = (unsigned long)ei + item_size;
1728                 if (ptr + size < end)
1729                         memmove_extent_buffer(leaf, ptr, ptr + size,
1730                                               end - ptr - size);
1731                 item_size -= size;
1732                 btrfs_truncate_item(trans, root, path, item_size, 1);
1733         }
1734         btrfs_mark_buffer_dirty(leaf);
1735 }
1736
1737 static noinline_for_stack
1738 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1739                                  struct btrfs_root *root,
1740                                  struct btrfs_path *path,
1741                                  u64 bytenr, u64 num_bytes, u64 parent,
1742                                  u64 root_objectid, u64 owner,
1743                                  u64 offset, int refs_to_add,
1744                                  struct btrfs_delayed_extent_op *extent_op)
1745 {
1746         struct btrfs_extent_inline_ref *iref;
1747         int ret;
1748
1749         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1750                                            bytenr, num_bytes, parent,
1751                                            root_objectid, owner, offset, 1);
1752         if (ret == 0) {
1753                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1754                 update_inline_extent_backref(trans, root, path, iref,
1755                                              refs_to_add, extent_op);
1756         } else if (ret == -ENOENT) {
1757                 setup_inline_extent_backref(trans, root, path, iref, parent,
1758                                             root_objectid, owner, offset,
1759                                             refs_to_add, extent_op);
1760                 ret = 0;
1761         }
1762         return ret;
1763 }
1764
1765 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1766                                  struct btrfs_root *root,
1767                                  struct btrfs_path *path,
1768                                  u64 bytenr, u64 parent, u64 root_objectid,
1769                                  u64 owner, u64 offset, int refs_to_add)
1770 {
1771         int ret;
1772         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1773                 BUG_ON(refs_to_add != 1);
1774                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1775                                             parent, root_objectid);
1776         } else {
1777                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1778                                              parent, root_objectid,
1779                                              owner, offset, refs_to_add);
1780         }
1781         return ret;
1782 }
1783
1784 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1785                                  struct btrfs_root *root,
1786                                  struct btrfs_path *path,
1787                                  struct btrfs_extent_inline_ref *iref,
1788                                  int refs_to_drop, int is_data)
1789 {
1790         int ret = 0;
1791
1792         BUG_ON(!is_data && refs_to_drop != 1);
1793         if (iref) {
1794                 update_inline_extent_backref(trans, root, path, iref,
1795                                              -refs_to_drop, NULL);
1796         } else if (is_data) {
1797                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1798         } else {
1799                 ret = btrfs_del_item(trans, root, path);
1800         }
1801         return ret;
1802 }
1803
1804 static int btrfs_issue_discard(struct block_device *bdev,
1805                                 u64 start, u64 len)
1806 {
1807         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1808 }
1809
1810 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1811                                 u64 num_bytes, u64 *actual_bytes)
1812 {
1813         int ret;
1814         u64 discarded_bytes = 0;
1815         struct btrfs_bio *bbio = NULL;
1816
1817
1818         /* Tell the block device(s) that the sectors can be discarded */
1819         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1820                               bytenr, &num_bytes, &bbio, 0);
1821         /* Error condition is -ENOMEM */
1822         if (!ret) {
1823                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1824                 int i;
1825
1826
1827                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1828                         if (!stripe->dev->can_discard)
1829                                 continue;
1830
1831                         ret = btrfs_issue_discard(stripe->dev->bdev,
1832                                                   stripe->physical,
1833                                                   stripe->length);
1834                         if (!ret)
1835                                 discarded_bytes += stripe->length;
1836                         else if (ret != -EOPNOTSUPP)
1837                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1838
1839                         /*
1840                          * Just in case we get back EOPNOTSUPP for some reason,
1841                          * just ignore the return value so we don't screw up
1842                          * people calling discard_extent.
1843                          */
1844                         ret = 0;
1845                 }
1846                 kfree(bbio);
1847         }
1848
1849         if (actual_bytes)
1850                 *actual_bytes = discarded_bytes;
1851
1852
1853         return ret;
1854 }
1855
1856 /* Can return -ENOMEM */
1857 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1858                          struct btrfs_root *root,
1859                          u64 bytenr, u64 num_bytes, u64 parent,
1860                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1861 {
1862         int ret;
1863         struct btrfs_fs_info *fs_info = root->fs_info;
1864
1865         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1866                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1867
1868         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1869                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1870                                         num_bytes,
1871                                         parent, root_objectid, (int)owner,
1872                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1873         } else {
1874                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1875                                         num_bytes,
1876                                         parent, root_objectid, owner, offset,
1877                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1878         }
1879         return ret;
1880 }
1881
1882 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1883                                   struct btrfs_root *root,
1884                                   u64 bytenr, u64 num_bytes,
1885                                   u64 parent, u64 root_objectid,
1886                                   u64 owner, u64 offset, int refs_to_add,
1887                                   struct btrfs_delayed_extent_op *extent_op)
1888 {
1889         struct btrfs_path *path;
1890         struct extent_buffer *leaf;
1891         struct btrfs_extent_item *item;
1892         u64 refs;
1893         int ret;
1894         int err = 0;
1895
1896         path = btrfs_alloc_path();
1897         if (!path)
1898                 return -ENOMEM;
1899
1900         path->reada = 1;
1901         path->leave_spinning = 1;
1902         /* this will setup the path even if it fails to insert the back ref */
1903         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1904                                            path, bytenr, num_bytes, parent,
1905                                            root_objectid, owner, offset,
1906                                            refs_to_add, extent_op);
1907         if (ret == 0)
1908                 goto out;
1909
1910         if (ret != -EAGAIN) {
1911                 err = ret;
1912                 goto out;
1913         }
1914
1915         leaf = path->nodes[0];
1916         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1917         refs = btrfs_extent_refs(leaf, item);
1918         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1919         if (extent_op)
1920                 __run_delayed_extent_op(extent_op, leaf, item);
1921
1922         btrfs_mark_buffer_dirty(leaf);
1923         btrfs_release_path(path);
1924
1925         path->reada = 1;
1926         path->leave_spinning = 1;
1927
1928         /* now insert the actual backref */
1929         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1930                                     path, bytenr, parent, root_objectid,
1931                                     owner, offset, refs_to_add);
1932         if (ret)
1933                 btrfs_abort_transaction(trans, root, ret);
1934 out:
1935         btrfs_free_path(path);
1936         return err;
1937 }
1938
1939 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1940                                 struct btrfs_root *root,
1941                                 struct btrfs_delayed_ref_node *node,
1942                                 struct btrfs_delayed_extent_op *extent_op,
1943                                 int insert_reserved)
1944 {
1945         int ret = 0;
1946         struct btrfs_delayed_data_ref *ref;
1947         struct btrfs_key ins;
1948         u64 parent = 0;
1949         u64 ref_root = 0;
1950         u64 flags = 0;
1951
1952         ins.objectid = node->bytenr;
1953         ins.offset = node->num_bytes;
1954         ins.type = BTRFS_EXTENT_ITEM_KEY;
1955
1956         ref = btrfs_delayed_node_to_data_ref(node);
1957         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1958                 parent = ref->parent;
1959         else
1960                 ref_root = ref->root;
1961
1962         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1963                 if (extent_op) {
1964                         BUG_ON(extent_op->update_key);
1965                         flags |= extent_op->flags_to_set;
1966                 }
1967                 ret = alloc_reserved_file_extent(trans, root,
1968                                                  parent, ref_root, flags,
1969                                                  ref->objectid, ref->offset,
1970                                                  &ins, node->ref_mod);
1971         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1972                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1973                                              node->num_bytes, parent,
1974                                              ref_root, ref->objectid,
1975                                              ref->offset, node->ref_mod,
1976                                              extent_op);
1977         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1978                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1979                                           node->num_bytes, parent,
1980                                           ref_root, ref->objectid,
1981                                           ref->offset, node->ref_mod,
1982                                           extent_op);
1983         } else {
1984                 BUG();
1985         }
1986         return ret;
1987 }
1988
1989 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1990                                     struct extent_buffer *leaf,
1991                                     struct btrfs_extent_item *ei)
1992 {
1993         u64 flags = btrfs_extent_flags(leaf, ei);
1994         if (extent_op->update_flags) {
1995                 flags |= extent_op->flags_to_set;
1996                 btrfs_set_extent_flags(leaf, ei, flags);
1997         }
1998
1999         if (extent_op->update_key) {
2000                 struct btrfs_tree_block_info *bi;
2001                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2002                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2003                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2004         }
2005 }
2006
2007 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2008                                  struct btrfs_root *root,
2009                                  struct btrfs_delayed_ref_node *node,
2010                                  struct btrfs_delayed_extent_op *extent_op)
2011 {
2012         struct btrfs_key key;
2013         struct btrfs_path *path;
2014         struct btrfs_extent_item *ei;
2015         struct extent_buffer *leaf;
2016         u32 item_size;
2017         int ret;
2018         int err = 0;
2019
2020         if (trans->aborted)
2021                 return 0;
2022
2023         path = btrfs_alloc_path();
2024         if (!path)
2025                 return -ENOMEM;
2026
2027         key.objectid = node->bytenr;
2028         key.type = BTRFS_EXTENT_ITEM_KEY;
2029         key.offset = node->num_bytes;
2030
2031         path->reada = 1;
2032         path->leave_spinning = 1;
2033         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2034                                 path, 0, 1);
2035         if (ret < 0) {
2036                 err = ret;
2037                 goto out;
2038         }
2039         if (ret > 0) {
2040                 err = -EIO;
2041                 goto out;
2042         }
2043
2044         leaf = path->nodes[0];
2045         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2046 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2047         if (item_size < sizeof(*ei)) {
2048                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2049                                              path, (u64)-1, 0);
2050                 if (ret < 0) {
2051                         err = ret;
2052                         goto out;
2053                 }
2054                 leaf = path->nodes[0];
2055                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2056         }
2057 #endif
2058         BUG_ON(item_size < sizeof(*ei));
2059         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2060         __run_delayed_extent_op(extent_op, leaf, ei);
2061
2062         btrfs_mark_buffer_dirty(leaf);
2063 out:
2064         btrfs_free_path(path);
2065         return err;
2066 }
2067
2068 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2069                                 struct btrfs_root *root,
2070                                 struct btrfs_delayed_ref_node *node,
2071                                 struct btrfs_delayed_extent_op *extent_op,
2072                                 int insert_reserved)
2073 {
2074         int ret = 0;
2075         struct btrfs_delayed_tree_ref *ref;
2076         struct btrfs_key ins;
2077         u64 parent = 0;
2078         u64 ref_root = 0;
2079
2080         ins.objectid = node->bytenr;
2081         ins.offset = node->num_bytes;
2082         ins.type = BTRFS_EXTENT_ITEM_KEY;
2083
2084         ref = btrfs_delayed_node_to_tree_ref(node);
2085         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2086                 parent = ref->parent;
2087         else
2088                 ref_root = ref->root;
2089
2090         BUG_ON(node->ref_mod != 1);
2091         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2092                 BUG_ON(!extent_op || !extent_op->update_flags ||
2093                        !extent_op->update_key);
2094                 ret = alloc_reserved_tree_block(trans, root,
2095                                                 parent, ref_root,
2096                                                 extent_op->flags_to_set,
2097                                                 &extent_op->key,
2098                                                 ref->level, &ins);
2099         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2100                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2101                                              node->num_bytes, parent, ref_root,
2102                                              ref->level, 0, 1, extent_op);
2103         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2104                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2105                                           node->num_bytes, parent, ref_root,
2106                                           ref->level, 0, 1, extent_op);
2107         } else {
2108                 BUG();
2109         }
2110         return ret;
2111 }
2112
2113 /* helper function to actually process a single delayed ref entry */
2114 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2115                                struct btrfs_root *root,
2116                                struct btrfs_delayed_ref_node *node,
2117                                struct btrfs_delayed_extent_op *extent_op,
2118                                int insert_reserved)
2119 {
2120         int ret = 0;
2121
2122         if (trans->aborted)
2123                 return 0;
2124
2125         if (btrfs_delayed_ref_is_head(node)) {
2126                 struct btrfs_delayed_ref_head *head;
2127                 /*
2128                  * we've hit the end of the chain and we were supposed
2129                  * to insert this extent into the tree.  But, it got
2130                  * deleted before we ever needed to insert it, so all
2131                  * we have to do is clean up the accounting
2132                  */
2133                 BUG_ON(extent_op);
2134                 head = btrfs_delayed_node_to_head(node);
2135                 if (insert_reserved) {
2136                         btrfs_pin_extent(root, node->bytenr,
2137                                          node->num_bytes, 1);
2138                         if (head->is_data) {
2139                                 ret = btrfs_del_csums(trans, root,
2140                                                       node->bytenr,
2141                                                       node->num_bytes);
2142                         }
2143                 }
2144                 return ret;
2145         }
2146
2147         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2148             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2149                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2150                                            insert_reserved);
2151         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2152                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2153                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2154                                            insert_reserved);
2155         else
2156                 BUG();
2157         return ret;
2158 }
2159
2160 static noinline struct btrfs_delayed_ref_node *
2161 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2162 {
2163         struct rb_node *node;
2164         struct btrfs_delayed_ref_node *ref;
2165         int action = BTRFS_ADD_DELAYED_REF;
2166 again:
2167         /*
2168          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2169          * this prevents ref count from going down to zero when
2170          * there still are pending delayed ref.
2171          */
2172         node = rb_prev(&head->node.rb_node);
2173         while (1) {
2174                 if (!node)
2175                         break;
2176                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2177                                 rb_node);
2178                 if (ref->bytenr != head->node.bytenr)
2179                         break;
2180                 if (ref->action == action)
2181                         return ref;
2182                 node = rb_prev(node);
2183         }
2184         if (action == BTRFS_ADD_DELAYED_REF) {
2185                 action = BTRFS_DROP_DELAYED_REF;
2186                 goto again;
2187         }
2188         return NULL;
2189 }
2190
2191 /*
2192  * Returns 0 on success or if called with an already aborted transaction.
2193  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2194  */
2195 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2196                                        struct btrfs_root *root,
2197                                        struct list_head *cluster)
2198 {
2199         struct btrfs_delayed_ref_root *delayed_refs;
2200         struct btrfs_delayed_ref_node *ref;
2201         struct btrfs_delayed_ref_head *locked_ref = NULL;
2202         struct btrfs_delayed_extent_op *extent_op;
2203         struct btrfs_fs_info *fs_info = root->fs_info;
2204         int ret;
2205         int count = 0;
2206         int must_insert_reserved = 0;
2207
2208         delayed_refs = &trans->transaction->delayed_refs;
2209         while (1) {
2210                 if (!locked_ref) {
2211                         /* pick a new head ref from the cluster list */
2212                         if (list_empty(cluster))
2213                                 break;
2214
2215                         locked_ref = list_entry(cluster->next,
2216                                      struct btrfs_delayed_ref_head, cluster);
2217
2218                         /* grab the lock that says we are going to process
2219                          * all the refs for this head */
2220                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2221
2222                         /*
2223                          * we may have dropped the spin lock to get the head
2224                          * mutex lock, and that might have given someone else
2225                          * time to free the head.  If that's true, it has been
2226                          * removed from our list and we can move on.
2227                          */
2228                         if (ret == -EAGAIN) {
2229                                 locked_ref = NULL;
2230                                 count++;
2231                                 continue;
2232                         }
2233                 }
2234
2235                 /*
2236                  * We need to try and merge add/drops of the same ref since we
2237                  * can run into issues with relocate dropping the implicit ref
2238                  * and then it being added back again before the drop can
2239                  * finish.  If we merged anything we need to re-loop so we can
2240                  * get a good ref.
2241                  */
2242                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2243                                          locked_ref);
2244
2245                 /*
2246                  * locked_ref is the head node, so we have to go one
2247                  * node back for any delayed ref updates
2248                  */
2249                 ref = select_delayed_ref(locked_ref);
2250
2251                 if (ref && ref->seq &&
2252                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2253                         /*
2254                          * there are still refs with lower seq numbers in the
2255                          * process of being added. Don't run this ref yet.
2256                          */
2257                         list_del_init(&locked_ref->cluster);
2258                         btrfs_delayed_ref_unlock(locked_ref);
2259                         locked_ref = NULL;
2260                         delayed_refs->num_heads_ready++;
2261                         spin_unlock(&delayed_refs->lock);
2262                         cond_resched();
2263                         spin_lock(&delayed_refs->lock);
2264                         continue;
2265                 }
2266
2267                 /*
2268                  * record the must insert reserved flag before we
2269                  * drop the spin lock.
2270                  */
2271                 must_insert_reserved = locked_ref->must_insert_reserved;
2272                 locked_ref->must_insert_reserved = 0;
2273
2274                 extent_op = locked_ref->extent_op;
2275                 locked_ref->extent_op = NULL;
2276
2277                 if (!ref) {
2278                         /* All delayed refs have been processed, Go ahead
2279                          * and send the head node to run_one_delayed_ref,
2280                          * so that any accounting fixes can happen
2281                          */
2282                         ref = &locked_ref->node;
2283
2284                         if (extent_op && must_insert_reserved) {
2285                                 btrfs_free_delayed_extent_op(extent_op);
2286                                 extent_op = NULL;
2287                         }
2288
2289                         if (extent_op) {
2290                                 spin_unlock(&delayed_refs->lock);
2291
2292                                 ret = run_delayed_extent_op(trans, root,
2293                                                             ref, extent_op);
2294                                 btrfs_free_delayed_extent_op(extent_op);
2295
2296                                 if (ret) {
2297                                         printk(KERN_DEBUG
2298                                                "btrfs: run_delayed_extent_op "
2299                                                "returned %d\n", ret);
2300                                         spin_lock(&delayed_refs->lock);
2301                                         btrfs_delayed_ref_unlock(locked_ref);
2302                                         return ret;
2303                                 }
2304
2305                                 goto next;
2306                         }
2307                 }
2308
2309                 ref->in_tree = 0;
2310                 rb_erase(&ref->rb_node, &delayed_refs->root);
2311                 delayed_refs->num_entries--;
2312                 if (!btrfs_delayed_ref_is_head(ref)) {
2313                         /*
2314                          * when we play the delayed ref, also correct the
2315                          * ref_mod on head
2316                          */
2317                         switch (ref->action) {
2318                         case BTRFS_ADD_DELAYED_REF:
2319                         case BTRFS_ADD_DELAYED_EXTENT:
2320                                 locked_ref->node.ref_mod -= ref->ref_mod;
2321                                 break;
2322                         case BTRFS_DROP_DELAYED_REF:
2323                                 locked_ref->node.ref_mod += ref->ref_mod;
2324                                 break;
2325                         default:
2326                                 WARN_ON(1);
2327                         }
2328                 }
2329                 spin_unlock(&delayed_refs->lock);
2330
2331                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2332                                           must_insert_reserved);
2333
2334                 btrfs_free_delayed_extent_op(extent_op);
2335                 if (ret) {
2336                         btrfs_delayed_ref_unlock(locked_ref);
2337                         btrfs_put_delayed_ref(ref);
2338                         printk(KERN_DEBUG
2339                                "btrfs: run_one_delayed_ref returned %d\n", ret);
2340                         spin_lock(&delayed_refs->lock);
2341                         return ret;
2342                 }
2343
2344                 /*
2345                  * If this node is a head, that means all the refs in this head
2346                  * have been dealt with, and we will pick the next head to deal
2347                  * with, so we must unlock the head and drop it from the cluster
2348                  * list before we release it.
2349                  */
2350                 if (btrfs_delayed_ref_is_head(ref)) {
2351                         list_del_init(&locked_ref->cluster);
2352                         btrfs_delayed_ref_unlock(locked_ref);
2353                         locked_ref = NULL;
2354                 }
2355                 btrfs_put_delayed_ref(ref);
2356                 count++;
2357 next:
2358                 cond_resched();
2359                 spin_lock(&delayed_refs->lock);
2360         }
2361         return count;
2362 }
2363
2364 #ifdef SCRAMBLE_DELAYED_REFS
2365 /*
2366  * Normally delayed refs get processed in ascending bytenr order. This
2367  * correlates in most cases to the order added. To expose dependencies on this
2368  * order, we start to process the tree in the middle instead of the beginning
2369  */
2370 static u64 find_middle(struct rb_root *root)
2371 {
2372         struct rb_node *n = root->rb_node;
2373         struct btrfs_delayed_ref_node *entry;
2374         int alt = 1;
2375         u64 middle;
2376         u64 first = 0, last = 0;
2377
2378         n = rb_first(root);
2379         if (n) {
2380                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2381                 first = entry->bytenr;
2382         }
2383         n = rb_last(root);
2384         if (n) {
2385                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2386                 last = entry->bytenr;
2387         }
2388         n = root->rb_node;
2389
2390         while (n) {
2391                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2392                 WARN_ON(!entry->in_tree);
2393
2394                 middle = entry->bytenr;
2395
2396                 if (alt)
2397                         n = n->rb_left;
2398                 else
2399                         n = n->rb_right;
2400
2401                 alt = 1 - alt;
2402         }
2403         return middle;
2404 }
2405 #endif
2406
2407 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2408                                          struct btrfs_fs_info *fs_info)
2409 {
2410         struct qgroup_update *qgroup_update;
2411         int ret = 0;
2412
2413         if (list_empty(&trans->qgroup_ref_list) !=
2414             !trans->delayed_ref_elem.seq) {
2415                 /* list without seq or seq without list */
2416                 printk(KERN_ERR "btrfs: qgroup accounting update error, list is%s empty, seq is %llu\n",
2417                         list_empty(&trans->qgroup_ref_list) ? "" : " not",
2418                         trans->delayed_ref_elem.seq);
2419                 BUG();
2420         }
2421
2422         if (!trans->delayed_ref_elem.seq)
2423                 return 0;
2424
2425         while (!list_empty(&trans->qgroup_ref_list)) {
2426                 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2427                                                  struct qgroup_update, list);
2428                 list_del(&qgroup_update->list);
2429                 if (!ret)
2430                         ret = btrfs_qgroup_account_ref(
2431                                         trans, fs_info, qgroup_update->node,
2432                                         qgroup_update->extent_op);
2433                 kfree(qgroup_update);
2434         }
2435
2436         btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2437
2438         return ret;
2439 }
2440
2441 /*
2442  * this starts processing the delayed reference count updates and
2443  * extent insertions we have queued up so far.  count can be
2444  * 0, which means to process everything in the tree at the start
2445  * of the run (but not newly added entries), or it can be some target
2446  * number you'd like to process.
2447  *
2448  * Returns 0 on success or if called with an aborted transaction
2449  * Returns <0 on error and aborts the transaction
2450  */
2451 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2452                            struct btrfs_root *root, unsigned long count)
2453 {
2454         struct rb_node *node;
2455         struct btrfs_delayed_ref_root *delayed_refs;
2456         struct btrfs_delayed_ref_node *ref;
2457         struct list_head cluster;
2458         int ret;
2459         u64 delayed_start;
2460         int run_all = count == (unsigned long)-1;
2461         int run_most = 0;
2462         int loops;
2463
2464         /* We'll clean this up in btrfs_cleanup_transaction */
2465         if (trans->aborted)
2466                 return 0;
2467
2468         if (root == root->fs_info->extent_root)
2469                 root = root->fs_info->tree_root;
2470
2471         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2472
2473         delayed_refs = &trans->transaction->delayed_refs;
2474         INIT_LIST_HEAD(&cluster);
2475 again:
2476         loops = 0;
2477         spin_lock(&delayed_refs->lock);
2478
2479 #ifdef SCRAMBLE_DELAYED_REFS
2480         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2481 #endif
2482
2483         if (count == 0) {
2484                 count = delayed_refs->num_entries * 2;
2485                 run_most = 1;
2486         }
2487         while (1) {
2488                 if (!(run_all || run_most) &&
2489                     delayed_refs->num_heads_ready < 64)
2490                         break;
2491
2492                 /*
2493                  * go find something we can process in the rbtree.  We start at
2494                  * the beginning of the tree, and then build a cluster
2495                  * of refs to process starting at the first one we are able to
2496                  * lock
2497                  */
2498                 delayed_start = delayed_refs->run_delayed_start;
2499                 ret = btrfs_find_ref_cluster(trans, &cluster,
2500                                              delayed_refs->run_delayed_start);
2501                 if (ret)
2502                         break;
2503
2504                 ret = run_clustered_refs(trans, root, &cluster);
2505                 if (ret < 0) {
2506                         btrfs_release_ref_cluster(&cluster);
2507                         spin_unlock(&delayed_refs->lock);
2508                         btrfs_abort_transaction(trans, root, ret);
2509                         return ret;
2510                 }
2511
2512                 count -= min_t(unsigned long, ret, count);
2513
2514                 if (count == 0)
2515                         break;
2516
2517                 if (delayed_start >= delayed_refs->run_delayed_start) {
2518                         if (loops == 0) {
2519                                 /*
2520                                  * btrfs_find_ref_cluster looped. let's do one
2521                                  * more cycle. if we don't run any delayed ref
2522                                  * during that cycle (because we can't because
2523                                  * all of them are blocked), bail out.
2524                                  */
2525                                 loops = 1;
2526                         } else {
2527                                 /*
2528                                  * no runnable refs left, stop trying
2529                                  */
2530                                 BUG_ON(run_all);
2531                                 break;
2532                         }
2533                 }
2534                 if (ret) {
2535                         /* refs were run, let's reset staleness detection */
2536                         loops = 0;
2537                 }
2538         }
2539
2540         if (run_all) {
2541                 if (!list_empty(&trans->new_bgs)) {
2542                         spin_unlock(&delayed_refs->lock);
2543                         btrfs_create_pending_block_groups(trans, root);
2544                         spin_lock(&delayed_refs->lock);
2545                 }
2546
2547                 node = rb_first(&delayed_refs->root);
2548                 if (!node)
2549                         goto out;
2550                 count = (unsigned long)-1;
2551
2552                 while (node) {
2553                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2554                                        rb_node);
2555                         if (btrfs_delayed_ref_is_head(ref)) {
2556                                 struct btrfs_delayed_ref_head *head;
2557
2558                                 head = btrfs_delayed_node_to_head(ref);
2559                                 atomic_inc(&ref->refs);
2560
2561                                 spin_unlock(&delayed_refs->lock);
2562                                 /*
2563                                  * Mutex was contended, block until it's
2564                                  * released and try again
2565                                  */
2566                                 mutex_lock(&head->mutex);
2567                                 mutex_unlock(&head->mutex);
2568
2569                                 btrfs_put_delayed_ref(ref);
2570                                 cond_resched();
2571                                 goto again;
2572                         }
2573                         node = rb_next(node);
2574                 }
2575                 spin_unlock(&delayed_refs->lock);
2576                 schedule_timeout(1);
2577                 goto again;
2578         }
2579 out:
2580         spin_unlock(&delayed_refs->lock);
2581         assert_qgroups_uptodate(trans);
2582         return 0;
2583 }
2584
2585 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2586                                 struct btrfs_root *root,
2587                                 u64 bytenr, u64 num_bytes, u64 flags,
2588                                 int is_data)
2589 {
2590         struct btrfs_delayed_extent_op *extent_op;
2591         int ret;
2592
2593         extent_op = btrfs_alloc_delayed_extent_op();
2594         if (!extent_op)
2595                 return -ENOMEM;
2596
2597         extent_op->flags_to_set = flags;
2598         extent_op->update_flags = 1;
2599         extent_op->update_key = 0;
2600         extent_op->is_data = is_data ? 1 : 0;
2601
2602         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2603                                           num_bytes, extent_op);
2604         if (ret)
2605                 btrfs_free_delayed_extent_op(extent_op);
2606         return ret;
2607 }
2608
2609 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2610                                       struct btrfs_root *root,
2611                                       struct btrfs_path *path,
2612                                       u64 objectid, u64 offset, u64 bytenr)
2613 {
2614         struct btrfs_delayed_ref_head *head;
2615         struct btrfs_delayed_ref_node *ref;
2616         struct btrfs_delayed_data_ref *data_ref;
2617         struct btrfs_delayed_ref_root *delayed_refs;
2618         struct rb_node *node;
2619         int ret = 0;
2620
2621         ret = -ENOENT;
2622         delayed_refs = &trans->transaction->delayed_refs;
2623         spin_lock(&delayed_refs->lock);
2624         head = btrfs_find_delayed_ref_head(trans, bytenr);
2625         if (!head)
2626                 goto out;
2627
2628         if (!mutex_trylock(&head->mutex)) {
2629                 atomic_inc(&head->node.refs);
2630                 spin_unlock(&delayed_refs->lock);
2631
2632                 btrfs_release_path(path);
2633
2634                 /*
2635                  * Mutex was contended, block until it's released and let
2636                  * caller try again
2637                  */
2638                 mutex_lock(&head->mutex);
2639                 mutex_unlock(&head->mutex);
2640                 btrfs_put_delayed_ref(&head->node);
2641                 return -EAGAIN;
2642         }
2643
2644         node = rb_prev(&head->node.rb_node);
2645         if (!node)
2646                 goto out_unlock;
2647
2648         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2649
2650         if (ref->bytenr != bytenr)
2651                 goto out_unlock;
2652
2653         ret = 1;
2654         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2655                 goto out_unlock;
2656
2657         data_ref = btrfs_delayed_node_to_data_ref(ref);
2658
2659         node = rb_prev(node);
2660         if (node) {
2661                 int seq = ref->seq;
2662
2663                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2664                 if (ref->bytenr == bytenr && ref->seq == seq)
2665                         goto out_unlock;
2666         }
2667
2668         if (data_ref->root != root->root_key.objectid ||
2669             data_ref->objectid != objectid || data_ref->offset != offset)
2670                 goto out_unlock;
2671
2672         ret = 0;
2673 out_unlock:
2674         mutex_unlock(&head->mutex);
2675 out:
2676         spin_unlock(&delayed_refs->lock);
2677         return ret;
2678 }
2679
2680 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2681                                         struct btrfs_root *root,
2682                                         struct btrfs_path *path,
2683                                         u64 objectid, u64 offset, u64 bytenr)
2684 {
2685         struct btrfs_root *extent_root = root->fs_info->extent_root;
2686         struct extent_buffer *leaf;
2687         struct btrfs_extent_data_ref *ref;
2688         struct btrfs_extent_inline_ref *iref;
2689         struct btrfs_extent_item *ei;
2690         struct btrfs_key key;
2691         u32 item_size;
2692         int ret;
2693
2694         key.objectid = bytenr;
2695         key.offset = (u64)-1;
2696         key.type = BTRFS_EXTENT_ITEM_KEY;
2697
2698         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2699         if (ret < 0)
2700                 goto out;
2701         BUG_ON(ret == 0); /* Corruption */
2702
2703         ret = -ENOENT;
2704         if (path->slots[0] == 0)
2705                 goto out;
2706
2707         path->slots[0]--;
2708         leaf = path->nodes[0];
2709         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2710
2711         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2712                 goto out;
2713
2714         ret = 1;
2715         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2716 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2717         if (item_size < sizeof(*ei)) {
2718                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2719                 goto out;
2720         }
2721 #endif
2722         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2723
2724         if (item_size != sizeof(*ei) +
2725             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2726                 goto out;
2727
2728         if (btrfs_extent_generation(leaf, ei) <=
2729             btrfs_root_last_snapshot(&root->root_item))
2730                 goto out;
2731
2732         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2733         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2734             BTRFS_EXTENT_DATA_REF_KEY)
2735                 goto out;
2736
2737         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2738         if (btrfs_extent_refs(leaf, ei) !=
2739             btrfs_extent_data_ref_count(leaf, ref) ||
2740             btrfs_extent_data_ref_root(leaf, ref) !=
2741             root->root_key.objectid ||
2742             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2743             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2744                 goto out;
2745
2746         ret = 0;
2747 out:
2748         return ret;
2749 }
2750
2751 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2752                           struct btrfs_root *root,
2753                           u64 objectid, u64 offset, u64 bytenr)
2754 {
2755         struct btrfs_path *path;
2756         int ret;
2757         int ret2;
2758
2759         path = btrfs_alloc_path();
2760         if (!path)
2761                 return -ENOENT;
2762
2763         do {
2764                 ret = check_committed_ref(trans, root, path, objectid,
2765                                           offset, bytenr);
2766                 if (ret && ret != -ENOENT)
2767                         goto out;
2768
2769                 ret2 = check_delayed_ref(trans, root, path, objectid,
2770                                          offset, bytenr);
2771         } while (ret2 == -EAGAIN);
2772
2773         if (ret2 && ret2 != -ENOENT) {
2774                 ret = ret2;
2775                 goto out;
2776         }
2777
2778         if (ret != -ENOENT || ret2 != -ENOENT)
2779                 ret = 0;
2780 out:
2781         btrfs_free_path(path);
2782         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2783                 WARN_ON(ret > 0);
2784         return ret;
2785 }
2786
2787 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2788                            struct btrfs_root *root,
2789                            struct extent_buffer *buf,
2790                            int full_backref, int inc, int for_cow)
2791 {
2792         u64 bytenr;
2793         u64 num_bytes;
2794         u64 parent;
2795         u64 ref_root;
2796         u32 nritems;
2797         struct btrfs_key key;
2798         struct btrfs_file_extent_item *fi;
2799         int i;
2800         int level;
2801         int ret = 0;
2802         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2803                             u64, u64, u64, u64, u64, u64, int);
2804
2805         ref_root = btrfs_header_owner(buf);
2806         nritems = btrfs_header_nritems(buf);
2807         level = btrfs_header_level(buf);
2808
2809         if (!root->ref_cows && level == 0)
2810                 return 0;
2811
2812         if (inc)
2813                 process_func = btrfs_inc_extent_ref;
2814         else
2815                 process_func = btrfs_free_extent;
2816
2817         if (full_backref)
2818                 parent = buf->start;
2819         else
2820                 parent = 0;
2821
2822         for (i = 0; i < nritems; i++) {
2823                 if (level == 0) {
2824                         btrfs_item_key_to_cpu(buf, &key, i);
2825                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2826                                 continue;
2827                         fi = btrfs_item_ptr(buf, i,
2828                                             struct btrfs_file_extent_item);
2829                         if (btrfs_file_extent_type(buf, fi) ==
2830                             BTRFS_FILE_EXTENT_INLINE)
2831                                 continue;
2832                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2833                         if (bytenr == 0)
2834                                 continue;
2835
2836                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2837                         key.offset -= btrfs_file_extent_offset(buf, fi);
2838                         ret = process_func(trans, root, bytenr, num_bytes,
2839                                            parent, ref_root, key.objectid,
2840                                            key.offset, for_cow);
2841                         if (ret)
2842                                 goto fail;
2843                 } else {
2844                         bytenr = btrfs_node_blockptr(buf, i);
2845                         num_bytes = btrfs_level_size(root, level - 1);
2846                         ret = process_func(trans, root, bytenr, num_bytes,
2847                                            parent, ref_root, level - 1, 0,
2848                                            for_cow);
2849                         if (ret)
2850                                 goto fail;
2851                 }
2852         }
2853         return 0;
2854 fail:
2855         return ret;
2856 }
2857
2858 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2859                   struct extent_buffer *buf, int full_backref, int for_cow)
2860 {
2861         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
2862 }
2863
2864 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2865                   struct extent_buffer *buf, int full_backref, int for_cow)
2866 {
2867         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
2868 }
2869
2870 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2871                                  struct btrfs_root *root,
2872                                  struct btrfs_path *path,
2873                                  struct btrfs_block_group_cache *cache)
2874 {
2875         int ret;
2876         struct btrfs_root *extent_root = root->fs_info->extent_root;
2877         unsigned long bi;
2878         struct extent_buffer *leaf;
2879
2880         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2881         if (ret < 0)
2882                 goto fail;
2883         BUG_ON(ret); /* Corruption */
2884
2885         leaf = path->nodes[0];
2886         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2887         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2888         btrfs_mark_buffer_dirty(leaf);
2889         btrfs_release_path(path);
2890 fail:
2891         if (ret) {
2892                 btrfs_abort_transaction(trans, root, ret);
2893                 return ret;
2894         }
2895         return 0;
2896
2897 }
2898
2899 static struct btrfs_block_group_cache *
2900 next_block_group(struct btrfs_root *root,
2901                  struct btrfs_block_group_cache *cache)
2902 {
2903         struct rb_node *node;
2904         spin_lock(&root->fs_info->block_group_cache_lock);
2905         node = rb_next(&cache->cache_node);
2906         btrfs_put_block_group(cache);
2907         if (node) {
2908                 cache = rb_entry(node, struct btrfs_block_group_cache,
2909                                  cache_node);
2910                 btrfs_get_block_group(cache);
2911         } else
2912                 cache = NULL;
2913         spin_unlock(&root->fs_info->block_group_cache_lock);
2914         return cache;
2915 }
2916
2917 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2918                             struct btrfs_trans_handle *trans,
2919                             struct btrfs_path *path)
2920 {
2921         struct btrfs_root *root = block_group->fs_info->tree_root;
2922         struct inode *inode = NULL;
2923         u64 alloc_hint = 0;
2924         int dcs = BTRFS_DC_ERROR;
2925         int num_pages = 0;
2926         int retries = 0;
2927         int ret = 0;
2928
2929         /*
2930          * If this block group is smaller than 100 megs don't bother caching the
2931          * block group.
2932          */
2933         if (block_group->key.offset < (100 * 1024 * 1024)) {
2934                 spin_lock(&block_group->lock);
2935                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2936                 spin_unlock(&block_group->lock);
2937                 return 0;
2938         }
2939
2940 again:
2941         inode = lookup_free_space_inode(root, block_group, path);
2942         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2943                 ret = PTR_ERR(inode);
2944                 btrfs_release_path(path);
2945                 goto out;
2946         }
2947
2948         if (IS_ERR(inode)) {
2949                 BUG_ON(retries);
2950                 retries++;
2951
2952                 if (block_group->ro)
2953                         goto out_free;
2954
2955                 ret = create_free_space_inode(root, trans, block_group, path);
2956                 if (ret)
2957                         goto out_free;
2958                 goto again;
2959         }
2960
2961         /* We've already setup this transaction, go ahead and exit */
2962         if (block_group->cache_generation == trans->transid &&
2963             i_size_read(inode)) {
2964                 dcs = BTRFS_DC_SETUP;
2965                 goto out_put;
2966         }
2967
2968         /*
2969          * We want to set the generation to 0, that way if anything goes wrong
2970          * from here on out we know not to trust this cache when we load up next
2971          * time.
2972          */
2973         BTRFS_I(inode)->generation = 0;
2974         ret = btrfs_update_inode(trans, root, inode);
2975         WARN_ON(ret);
2976
2977         if (i_size_read(inode) > 0) {
2978                 ret = btrfs_truncate_free_space_cache(root, trans, path,
2979                                                       inode);
2980                 if (ret)
2981                         goto out_put;
2982         }
2983
2984         spin_lock(&block_group->lock);
2985         if (block_group->cached != BTRFS_CACHE_FINISHED ||
2986             !btrfs_test_opt(root, SPACE_CACHE)) {
2987                 /*
2988                  * don't bother trying to write stuff out _if_
2989                  * a) we're not cached,
2990                  * b) we're with nospace_cache mount option.
2991                  */
2992                 dcs = BTRFS_DC_WRITTEN;
2993                 spin_unlock(&block_group->lock);
2994                 goto out_put;
2995         }
2996         spin_unlock(&block_group->lock);
2997
2998         /*
2999          * Try to preallocate enough space based on how big the block group is.
3000          * Keep in mind this has to include any pinned space which could end up
3001          * taking up quite a bit since it's not folded into the other space
3002          * cache.
3003          */
3004         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3005         if (!num_pages)
3006                 num_pages = 1;
3007
3008         num_pages *= 16;
3009         num_pages *= PAGE_CACHE_SIZE;
3010
3011         ret = btrfs_check_data_free_space(inode, num_pages);
3012         if (ret)
3013                 goto out_put;
3014
3015         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3016                                               num_pages, num_pages,
3017                                               &alloc_hint);
3018         if (!ret)
3019                 dcs = BTRFS_DC_SETUP;
3020         btrfs_free_reserved_data_space(inode, num_pages);
3021
3022 out_put:
3023         iput(inode);
3024 out_free:
3025         btrfs_release_path(path);
3026 out:
3027         spin_lock(&block_group->lock);
3028         if (!ret && dcs == BTRFS_DC_SETUP)
3029                 block_group->cache_generation = trans->transid;
3030         block_group->disk_cache_state = dcs;
3031         spin_unlock(&block_group->lock);
3032
3033         return ret;
3034 }
3035
3036 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3037                                    struct btrfs_root *root)
3038 {
3039         struct btrfs_block_group_cache *cache;
3040         int err = 0;
3041         struct btrfs_path *path;
3042         u64 last = 0;
3043
3044         path = btrfs_alloc_path();
3045         if (!path)
3046                 return -ENOMEM;
3047
3048 again:
3049         while (1) {
3050                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3051                 while (cache) {
3052                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3053                                 break;
3054                         cache = next_block_group(root, cache);
3055                 }
3056                 if (!cache) {
3057                         if (last == 0)
3058                                 break;
3059                         last = 0;
3060                         continue;
3061                 }
3062                 err = cache_save_setup(cache, trans, path);
3063                 last = cache->key.objectid + cache->key.offset;
3064                 btrfs_put_block_group(cache);
3065         }
3066
3067         while (1) {
3068                 if (last == 0) {
3069                         err = btrfs_run_delayed_refs(trans, root,
3070                                                      (unsigned long)-1);
3071                         if (err) /* File system offline */
3072                                 goto out;
3073                 }
3074
3075                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3076                 while (cache) {
3077                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3078                                 btrfs_put_block_group(cache);
3079                                 goto again;
3080                         }
3081
3082                         if (cache->dirty)
3083                                 break;
3084                         cache = next_block_group(root, cache);
3085                 }
3086                 if (!cache) {
3087                         if (last == 0)
3088                                 break;
3089                         last = 0;
3090                         continue;
3091                 }
3092
3093                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3094                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3095                 cache->dirty = 0;
3096                 last = cache->key.objectid + cache->key.offset;
3097
3098                 err = write_one_cache_group(trans, root, path, cache);
3099                 if (err) /* File system offline */
3100                         goto out;
3101
3102                 btrfs_put_block_group(cache);
3103         }
3104
3105         while (1) {
3106                 /*
3107                  * I don't think this is needed since we're just marking our
3108                  * preallocated extent as written, but just in case it can't
3109                  * hurt.
3110                  */
3111                 if (last == 0) {
3112                         err = btrfs_run_delayed_refs(trans, root,
3113                                                      (unsigned long)-1);
3114                         if (err) /* File system offline */
3115                                 goto out;
3116                 }
3117
3118                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3119                 while (cache) {
3120                         /*
3121                          * Really this shouldn't happen, but it could if we
3122                          * couldn't write the entire preallocated extent and
3123                          * splitting the extent resulted in a new block.
3124                          */
3125                         if (cache->dirty) {
3126                                 btrfs_put_block_group(cache);
3127                                 goto again;
3128                         }
3129                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3130                                 break;
3131                         cache = next_block_group(root, cache);
3132                 }
3133                 if (!cache) {
3134                         if (last == 0)
3135                                 break;
3136                         last = 0;
3137                         continue;
3138                 }
3139
3140                 err = btrfs_write_out_cache(root, trans, cache, path);
3141
3142                 /*
3143                  * If we didn't have an error then the cache state is still
3144                  * NEED_WRITE, so we can set it to WRITTEN.
3145                  */
3146                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3147                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3148                 last = cache->key.objectid + cache->key.offset;
3149                 btrfs_put_block_group(cache);
3150         }
3151 out:
3152
3153         btrfs_free_path(path);
3154         return err;
3155 }
3156
3157 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3158 {
3159         struct btrfs_block_group_cache *block_group;
3160         int readonly = 0;
3161
3162         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3163         if (!block_group || block_group->ro)
3164                 readonly = 1;
3165         if (block_group)
3166                 btrfs_put_block_group(block_group);
3167         return readonly;
3168 }
3169
3170 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3171                              u64 total_bytes, u64 bytes_used,
3172                              struct btrfs_space_info **space_info)
3173 {
3174         struct btrfs_space_info *found;
3175         int i;
3176         int factor;
3177
3178         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3179                      BTRFS_BLOCK_GROUP_RAID10))
3180                 factor = 2;
3181         else
3182                 factor = 1;
3183
3184         found = __find_space_info(info, flags);
3185         if (found) {
3186                 spin_lock(&found->lock);
3187                 found->total_bytes += total_bytes;
3188                 found->disk_total += total_bytes * factor;
3189                 found->bytes_used += bytes_used;
3190                 found->disk_used += bytes_used * factor;
3191                 found->full = 0;
3192                 spin_unlock(&found->lock);
3193                 *space_info = found;
3194                 return 0;
3195         }
3196         found = kzalloc(sizeof(*found), GFP_NOFS);
3197         if (!found)
3198                 return -ENOMEM;
3199
3200         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3201                 INIT_LIST_HEAD(&found->block_groups[i]);
3202         init_rwsem(&found->groups_sem);
3203         spin_lock_init(&found->lock);
3204         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3205         found->total_bytes = total_bytes;
3206         found->disk_total = total_bytes * factor;
3207         found->bytes_used = bytes_used;
3208         found->disk_used = bytes_used * factor;
3209         found->bytes_pinned = 0;
3210         found->bytes_reserved = 0;
3211         found->bytes_readonly = 0;
3212         found->bytes_may_use = 0;
3213         found->full = 0;
3214         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3215         found->chunk_alloc = 0;
3216         found->flush = 0;
3217         init_waitqueue_head(&found->wait);
3218         *space_info = found;
3219         list_add_rcu(&found->list, &info->space_info);
3220         if (flags & BTRFS_BLOCK_GROUP_DATA)
3221                 info->data_sinfo = found;
3222         return 0;
3223 }
3224
3225 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3226 {
3227         u64 extra_flags = chunk_to_extended(flags) &
3228                                 BTRFS_EXTENDED_PROFILE_MASK;
3229
3230         if (flags & BTRFS_BLOCK_GROUP_DATA)
3231                 fs_info->avail_data_alloc_bits |= extra_flags;
3232         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3233                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3234         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3235                 fs_info->avail_system_alloc_bits |= extra_flags;
3236 }
3237
3238 /*
3239  * returns target flags in extended format or 0 if restripe for this
3240  * chunk_type is not in progress
3241  *
3242  * should be called with either volume_mutex or balance_lock held
3243  */
3244 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3245 {
3246         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3247         u64 target = 0;
3248
3249         if (!bctl)
3250                 return 0;
3251
3252         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3253             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3254                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3255         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3256                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3257                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3258         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3259                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3260                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3261         }
3262
3263         return target;
3264 }
3265
3266 /*
3267  * @flags: available profiles in extended format (see ctree.h)
3268  *
3269  * Returns reduced profile in chunk format.  If profile changing is in
3270  * progress (either running or paused) picks the target profile (if it's
3271  * already available), otherwise falls back to plain reducing.
3272  */
3273 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3274 {
3275         /*
3276          * we add in the count of missing devices because we want
3277          * to make sure that any RAID levels on a degraded FS
3278          * continue to be honored.
3279          */
3280         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3281                 root->fs_info->fs_devices->missing_devices;
3282         u64 target;
3283
3284         /*
3285          * see if restripe for this chunk_type is in progress, if so
3286          * try to reduce to the target profile
3287          */
3288         spin_lock(&root->fs_info->balance_lock);
3289         target = get_restripe_target(root->fs_info, flags);
3290         if (target) {
3291                 /* pick target profile only if it's already available */
3292                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3293                         spin_unlock(&root->fs_info->balance_lock);
3294                         return extended_to_chunk(target);
3295                 }
3296         }
3297         spin_unlock(&root->fs_info->balance_lock);
3298
3299         if (num_devices == 1)
3300                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
3301         if (num_devices < 4)
3302                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3303
3304         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3305             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3306                       BTRFS_BLOCK_GROUP_RAID10))) {
3307                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
3308         }
3309
3310         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
3311             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
3312                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
3313         }
3314
3315         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3316             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3317              (flags & BTRFS_BLOCK_GROUP_RAID10) |
3318              (flags & BTRFS_BLOCK_GROUP_DUP))) {
3319                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3320         }
3321
3322         return extended_to_chunk(flags);
3323 }
3324
3325 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3326 {
3327         if (flags & BTRFS_BLOCK_GROUP_DATA)
3328                 flags |= root->fs_info->avail_data_alloc_bits;
3329         else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3330                 flags |= root->fs_info->avail_system_alloc_bits;
3331         else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3332                 flags |= root->fs_info->avail_metadata_alloc_bits;
3333
3334         return btrfs_reduce_alloc_profile(root, flags);
3335 }
3336
3337 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3338 {
3339         u64 flags;
3340
3341         if (data)
3342                 flags = BTRFS_BLOCK_GROUP_DATA;
3343         else if (root == root->fs_info->chunk_root)
3344                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3345         else
3346                 flags = BTRFS_BLOCK_GROUP_METADATA;
3347
3348         return get_alloc_profile(root, flags);
3349 }
3350
3351 /*
3352  * This will check the space that the inode allocates from to make sure we have
3353  * enough space for bytes.
3354  */
3355 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3356 {
3357         struct btrfs_space_info *data_sinfo;
3358         struct btrfs_root *root = BTRFS_I(inode)->root;
3359         struct btrfs_fs_info *fs_info = root->fs_info;
3360         u64 used;
3361         int ret = 0, committed = 0, alloc_chunk = 1;
3362
3363         /* make sure bytes are sectorsize aligned */
3364         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3365
3366         if (root == root->fs_info->tree_root ||
3367             BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3368                 alloc_chunk = 0;
3369                 committed = 1;
3370         }
3371
3372         data_sinfo = fs_info->data_sinfo;
3373         if (!data_sinfo)
3374                 goto alloc;
3375
3376 again:
3377         /* make sure we have enough space to handle the data first */
3378         spin_lock(&data_sinfo->lock);
3379         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3380                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3381                 data_sinfo->bytes_may_use;
3382
3383         if (used + bytes > data_sinfo->total_bytes) {
3384                 struct btrfs_trans_handle *trans;
3385
3386                 /*
3387                  * if we don't have enough free bytes in this space then we need
3388                  * to alloc a new chunk.
3389                  */
3390                 if (!data_sinfo->full && alloc_chunk) {
3391                         u64 alloc_target;
3392
3393                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3394                         spin_unlock(&data_sinfo->lock);
3395 alloc:
3396                         alloc_target = btrfs_get_alloc_profile(root, 1);
3397                         trans = btrfs_join_transaction(root);
3398                         if (IS_ERR(trans))
3399                                 return PTR_ERR(trans);
3400
3401                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3402                                              alloc_target,
3403                                              CHUNK_ALLOC_NO_FORCE);
3404                         btrfs_end_transaction(trans, root);
3405                         if (ret < 0) {
3406                                 if (ret != -ENOSPC)
3407                                         return ret;
3408                                 else
3409                                         goto commit_trans;
3410                         }
3411
3412                         if (!data_sinfo)
3413                                 data_sinfo = fs_info->data_sinfo;
3414
3415                         goto again;
3416                 }
3417
3418                 /*
3419                  * If we have less pinned bytes than we want to allocate then
3420                  * don't bother committing the transaction, it won't help us.
3421                  */
3422                 if (data_sinfo->bytes_pinned < bytes)
3423                         committed = 1;
3424                 spin_unlock(&data_sinfo->lock);
3425
3426                 /* commit the current transaction and try again */
3427 commit_trans:
3428                 if (!committed &&
3429                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3430                         committed = 1;
3431                         trans = btrfs_join_transaction(root);
3432                         if (IS_ERR(trans))
3433                                 return PTR_ERR(trans);
3434                         ret = btrfs_commit_transaction(trans, root);
3435                         if (ret)
3436                                 return ret;
3437                         goto again;
3438                 }
3439
3440                 return -ENOSPC;
3441         }
3442         data_sinfo->bytes_may_use += bytes;
3443         trace_btrfs_space_reservation(root->fs_info, "space_info",
3444                                       data_sinfo->flags, bytes, 1);
3445         spin_unlock(&data_sinfo->lock);
3446
3447         return 0;
3448 }
3449
3450 /*
3451  * Called if we need to clear a data reservation for this inode.
3452  */
3453 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3454 {
3455         struct btrfs_root *root = BTRFS_I(inode)->root;
3456         struct btrfs_space_info *data_sinfo;
3457
3458         /* make sure bytes are sectorsize aligned */
3459         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3460
3461         data_sinfo = root->fs_info->data_sinfo;
3462         spin_lock(&data_sinfo->lock);
3463         data_sinfo->bytes_may_use -= bytes;
3464         trace_btrfs_space_reservation(root->fs_info, "space_info",
3465                                       data_sinfo->flags, bytes, 0);
3466         spin_unlock(&data_sinfo->lock);
3467 }
3468
3469 static void force_metadata_allocation(struct btrfs_fs_info *info)
3470 {
3471         struct list_head *head = &info->space_info;
3472         struct btrfs_space_info *found;
3473
3474         rcu_read_lock();
3475         list_for_each_entry_rcu(found, head, list) {
3476                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3477                         found->force_alloc = CHUNK_ALLOC_FORCE;
3478         }
3479         rcu_read_unlock();
3480 }
3481
3482 static int should_alloc_chunk(struct btrfs_root *root,
3483                               struct btrfs_space_info *sinfo, int force)
3484 {
3485         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3486         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3487         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3488         u64 thresh;
3489
3490         if (force == CHUNK_ALLOC_FORCE)
3491                 return 1;
3492
3493         /*
3494          * We need to take into account the global rsv because for all intents
3495          * and purposes it's used space.  Don't worry about locking the
3496          * global_rsv, it doesn't change except when the transaction commits.
3497          */
3498         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3499                 num_allocated += global_rsv->size;
3500
3501         /*
3502          * in limited mode, we want to have some free space up to
3503          * about 1% of the FS size.
3504          */
3505         if (force == CHUNK_ALLOC_LIMITED) {
3506                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3507                 thresh = max_t(u64, 64 * 1024 * 1024,
3508                                div_factor_fine(thresh, 1));
3509
3510                 if (num_bytes - num_allocated < thresh)
3511                         return 1;
3512         }
3513
3514         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3515                 return 0;
3516         return 1;
3517 }
3518
3519 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3520 {
3521         u64 num_dev;
3522
3523         if (type & BTRFS_BLOCK_GROUP_RAID10 ||
3524             type & BTRFS_BLOCK_GROUP_RAID0)
3525                 num_dev = root->fs_info->fs_devices->rw_devices;
3526         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3527                 num_dev = 2;
3528         else
3529                 num_dev = 1;    /* DUP or single */
3530
3531         /* metadata for updaing devices and chunk tree */
3532         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3533 }
3534
3535 static void check_system_chunk(struct btrfs_trans_handle *trans,
3536                                struct btrfs_root *root, u64 type)
3537 {
3538         struct btrfs_space_info *info;
3539         u64 left;
3540         u64 thresh;
3541
3542         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3543         spin_lock(&info->lock);
3544         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3545                 info->bytes_reserved - info->bytes_readonly;
3546         spin_unlock(&info->lock);
3547
3548         thresh = get_system_chunk_thresh(root, type);
3549         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3550                 printk(KERN_INFO "left=%llu, need=%llu, flags=%llu\n",
3551                        left, thresh, type);
3552                 dump_space_info(info, 0, 0);
3553         }
3554
3555         if (left < thresh) {
3556                 u64 flags;
3557
3558                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3559                 btrfs_alloc_chunk(trans, root, flags);
3560         }
3561 }
3562
3563 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3564                           struct btrfs_root *extent_root, u64 flags, int force)
3565 {
3566         struct btrfs_space_info *space_info;
3567         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3568         int wait_for_alloc = 0;
3569         int ret = 0;
3570
3571         /* Don't re-enter if we're already allocating a chunk */
3572         if (trans->allocating_chunk)
3573                 return -ENOSPC;
3574
3575         space_info = __find_space_info(extent_root->fs_info, flags);
3576         if (!space_info) {
3577                 ret = update_space_info(extent_root->fs_info, flags,
3578                                         0, 0, &space_info);
3579                 BUG_ON(ret); /* -ENOMEM */
3580         }
3581         BUG_ON(!space_info); /* Logic error */
3582
3583 again:
3584         spin_lock(&space_info->lock);
3585         if (force < space_info->force_alloc)
3586                 force = space_info->force_alloc;
3587         if (space_info->full) {
3588                 spin_unlock(&space_info->lock);
3589                 return 0;
3590         }
3591
3592         if (!should_alloc_chunk(extent_root, space_info, force)) {
3593                 spin_unlock(&space_info->lock);
3594                 return 0;
3595         } else if (space_info->chunk_alloc) {
3596                 wait_for_alloc = 1;
3597         } else {
3598                 space_info->chunk_alloc = 1;
3599         }
3600
3601         spin_unlock(&space_info->lock);
3602
3603         mutex_lock(&fs_info->chunk_mutex);
3604
3605         /*
3606          * The chunk_mutex is held throughout the entirety of a chunk
3607          * allocation, so once we've acquired the chunk_mutex we know that the
3608          * other guy is done and we need to recheck and see if we should
3609          * allocate.
3610          */
3611         if (wait_for_alloc) {
3612                 mutex_unlock(&fs_info->chunk_mutex);
3613                 wait_for_alloc = 0;
3614                 goto again;
3615         }
3616
3617         trans->allocating_chunk = true;
3618
3619         /*
3620          * If we have mixed data/metadata chunks we want to make sure we keep
3621          * allocating mixed chunks instead of individual chunks.
3622          */
3623         if (btrfs_mixed_space_info(space_info))
3624                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3625
3626         /*
3627          * if we're doing a data chunk, go ahead and make sure that
3628          * we keep a reasonable number of metadata chunks allocated in the
3629          * FS as well.
3630          */
3631         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3632                 fs_info->data_chunk_allocations++;
3633                 if (!(fs_info->data_chunk_allocations %
3634                       fs_info->metadata_ratio))
3635                         force_metadata_allocation(fs_info);
3636         }
3637
3638         /*
3639          * Check if we have enough space in SYSTEM chunk because we may need
3640          * to update devices.
3641          */
3642         check_system_chunk(trans, extent_root, flags);
3643
3644         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3645         trans->allocating_chunk = false;
3646         if (ret < 0 && ret != -ENOSPC)
3647                 goto out;
3648
3649         spin_lock(&space_info->lock);
3650         if (ret)
3651                 space_info->full = 1;
3652         else
3653                 ret = 1;
3654
3655         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3656         space_info->chunk_alloc = 0;
3657         spin_unlock(&space_info->lock);
3658 out:
3659         mutex_unlock(&fs_info->chunk_mutex);
3660         return ret;
3661 }
3662
3663 static int can_overcommit(struct btrfs_root *root,
3664                           struct btrfs_space_info *space_info, u64 bytes,
3665                           enum btrfs_reserve_flush_enum flush)
3666 {
3667         u64 profile = btrfs_get_alloc_profile(root, 0);
3668         u64 avail;
3669         u64 used;
3670
3671         used = space_info->bytes_used + space_info->bytes_reserved +
3672                 space_info->bytes_pinned + space_info->bytes_readonly +
3673                 space_info->bytes_may_use;
3674
3675         spin_lock(&root->fs_info->free_chunk_lock);
3676         avail = root->fs_info->free_chunk_space;
3677         spin_unlock(&root->fs_info->free_chunk_lock);
3678
3679         /*
3680          * If we have dup, raid1 or raid10 then only half of the free
3681          * space is actually useable.
3682          */
3683         if (profile & (BTRFS_BLOCK_GROUP_DUP |
3684                        BTRFS_BLOCK_GROUP_RAID1 |
3685                        BTRFS_BLOCK_GROUP_RAID10))
3686                 avail >>= 1;
3687
3688         /*
3689          * If we aren't flushing all things, let us overcommit up to
3690          * 1/2th of the space. If we can flush, don't let us overcommit
3691          * too much, let it overcommit up to 1/8 of the space.
3692          */
3693         if (flush == BTRFS_RESERVE_FLUSH_ALL)
3694                 avail >>= 3;
3695         else
3696                 avail >>= 1;
3697
3698         if (used + bytes < space_info->total_bytes + avail)
3699                 return 1;
3700         return 0;
3701 }
3702
3703 static inline int writeback_inodes_sb_nr_if_idle_safe(struct super_block *sb,
3704                                                       unsigned long nr_pages,
3705                                                       enum wb_reason reason)
3706 {
3707         /* the flusher is dealing with the dirty inodes now. */
3708         if (writeback_in_progress(sb->s_bdi))
3709                 return 1;
3710
3711         if (down_read_trylock(&sb->s_umount)) {
3712                 writeback_inodes_sb_nr(sb, nr_pages, reason);
3713                 up_read(&sb->s_umount);
3714                 return 1;
3715         }
3716
3717         return 0;
3718 }
3719
3720 void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3721                                   unsigned long nr_pages)
3722 {
3723         struct super_block *sb = root->fs_info->sb;
3724         int started;
3725
3726         /* If we can not start writeback, just sync all the delalloc file. */
3727         started = writeback_inodes_sb_nr_if_idle_safe(sb, nr_pages,
3728                                                       WB_REASON_FS_FREE_SPACE);
3729         if (!started) {
3730                 /*
3731                  * We needn't worry the filesystem going from r/w to r/o though
3732                  * we don't acquire ->s_umount mutex, because the filesystem
3733                  * should guarantee the delalloc inodes list be empty after
3734                  * the filesystem is readonly(all dirty pages are written to
3735                  * the disk).
3736                  */
3737                 btrfs_start_delalloc_inodes(root, 0);
3738                 btrfs_wait_ordered_extents(root, 0);
3739         }
3740 }
3741
3742 /*
3743  * shrink metadata reservation for delalloc
3744  */
3745 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
3746                             bool wait_ordered)
3747 {
3748         struct btrfs_block_rsv *block_rsv;
3749         struct btrfs_space_info *space_info;
3750         struct btrfs_trans_handle *trans;
3751         u64 delalloc_bytes;
3752         u64 max_reclaim;
3753         long time_left;
3754         unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3755         int loops = 0;
3756         enum btrfs_reserve_flush_enum flush;
3757
3758         trans = (struct btrfs_trans_handle *)current->journal_info;
3759         block_rsv = &root->fs_info->delalloc_block_rsv;
3760         space_info = block_rsv->space_info;
3761
3762         smp_mb();
3763         delalloc_bytes = root->fs_info->delalloc_bytes;
3764         if (delalloc_bytes == 0) {
3765                 if (trans)
3766                         return;
3767                 btrfs_wait_ordered_extents(root, 0);
3768                 return;
3769         }
3770
3771         while (delalloc_bytes && loops < 3) {
3772                 max_reclaim = min(delalloc_bytes, to_reclaim);
3773                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
3774                 btrfs_writeback_inodes_sb_nr(root, nr_pages);
3775                 /*
3776                  * We need to wait for the async pages to actually start before
3777                  * we do anything.
3778                  */
3779                 wait_event(root->fs_info->async_submit_wait,
3780                            !atomic_read(&root->fs_info->async_delalloc_pages));
3781
3782                 if (!trans)
3783                         flush = BTRFS_RESERVE_FLUSH_ALL;
3784                 else
3785                         flush = BTRFS_RESERVE_NO_FLUSH;
3786                 spin_lock(&space_info->lock);
3787                 if (can_overcommit(root, space_info, orig, flush)) {
3788                         spin_unlock(&space_info->lock);
3789                         break;
3790                 }
3791                 spin_unlock(&space_info->lock);
3792
3793                 loops++;
3794                 if (wait_ordered && !trans) {
3795                         btrfs_wait_ordered_extents(root, 0);
3796                 } else {
3797                         time_left = schedule_timeout_killable(1);
3798                         if (time_left)
3799                                 break;
3800                 }
3801                 smp_mb();
3802                 delalloc_bytes = root->fs_info->delalloc_bytes;
3803         }
3804 }
3805
3806 /**
3807  * maybe_commit_transaction - possibly commit the transaction if its ok to
3808  * @root - the root we're allocating for
3809  * @bytes - the number of bytes we want to reserve
3810  * @force - force the commit
3811  *
3812  * This will check to make sure that committing the transaction will actually
3813  * get us somewhere and then commit the transaction if it does.  Otherwise it
3814  * will return -ENOSPC.
3815  */
3816 static int may_commit_transaction(struct btrfs_root *root,
3817                                   struct btrfs_space_info *space_info,
3818                                   u64 bytes, int force)
3819 {
3820         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
3821         struct btrfs_trans_handle *trans;
3822
3823         trans = (struct btrfs_trans_handle *)current->journal_info;
3824         if (trans)
3825                 return -EAGAIN;
3826
3827         if (force)
3828                 goto commit;
3829
3830         /* See if there is enough pinned space to make this reservation */
3831         spin_lock(&space_info->lock);
3832         if (space_info->bytes_pinned >= bytes) {
3833                 spin_unlock(&space_info->lock);
3834                 goto commit;
3835         }
3836         spin_unlock(&space_info->lock);
3837
3838         /*
3839          * See if there is some space in the delayed insertion reservation for
3840          * this reservation.
3841          */
3842         if (space_info != delayed_rsv->space_info)
3843                 return -ENOSPC;
3844
3845         spin_lock(&space_info->lock);
3846         spin_lock(&delayed_rsv->lock);
3847         if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
3848                 spin_unlock(&delayed_rsv->lock);
3849                 spin_unlock(&space_info->lock);
3850                 return -ENOSPC;
3851         }
3852         spin_unlock(&delayed_rsv->lock);
3853         spin_unlock(&space_info->lock);
3854
3855 commit:
3856         trans = btrfs_join_transaction(root);
3857         if (IS_ERR(trans))
3858                 return -ENOSPC;
3859
3860         return btrfs_commit_transaction(trans, root);
3861 }
3862
3863 enum flush_state {
3864         FLUSH_DELAYED_ITEMS_NR  =       1,
3865         FLUSH_DELAYED_ITEMS     =       2,
3866         FLUSH_DELALLOC          =       3,
3867         FLUSH_DELALLOC_WAIT     =       4,
3868         ALLOC_CHUNK             =       5,
3869         COMMIT_TRANS            =       6,
3870 };
3871
3872 static int flush_space(struct btrfs_root *root,
3873                        struct btrfs_space_info *space_info, u64 num_bytes,
3874                        u64 orig_bytes, int state)
3875 {
3876         struct btrfs_trans_handle *trans;
3877         int nr;
3878         int ret = 0;
3879
3880         switch (state) {
3881         case FLUSH_DELAYED_ITEMS_NR:
3882         case FLUSH_DELAYED_ITEMS:
3883                 if (state == FLUSH_DELAYED_ITEMS_NR) {
3884                         u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
3885
3886                         nr = (int)div64_u64(num_bytes, bytes);
3887                         if (!nr)
3888                                 nr = 1;
3889                         nr *= 2;
3890                 } else {
3891                         nr = -1;
3892                 }
3893                 trans = btrfs_join_transaction(root);
3894                 if (IS_ERR(trans)) {
3895                         ret = PTR_ERR(trans);
3896                         break;
3897                 }
3898                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
3899                 btrfs_end_transaction(trans, root);
3900                 break;
3901         case FLUSH_DELALLOC:
3902         case FLUSH_DELALLOC_WAIT:
3903                 shrink_delalloc(root, num_bytes, orig_bytes,
3904                                 state == FLUSH_DELALLOC_WAIT);
3905                 break;
3906         case ALLOC_CHUNK:
3907                 trans = btrfs_join_transaction(root);
3908                 if (IS_ERR(trans)) {
3909                         ret = PTR_ERR(trans);
3910                         break;
3911                 }
3912                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3913                                      btrfs_get_alloc_profile(root, 0),
3914                                      CHUNK_ALLOC_NO_FORCE);
3915                 btrfs_end_transaction(trans, root);
3916                 if (ret == -ENOSPC)
3917                         ret = 0;
3918                 break;
3919         case COMMIT_TRANS:
3920                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
3921                 break;
3922         default:
3923                 ret = -ENOSPC;
3924                 break;
3925         }
3926
3927         return ret;
3928 }
3929 /**
3930  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
3931  * @root - the root we're allocating for
3932  * @block_rsv - the block_rsv we're allocating for
3933  * @orig_bytes - the number of bytes we want
3934  * @flush - wether or not we can flush to make our reservation
3935  *
3936  * This will reserve orgi_bytes number of bytes from the space info associated
3937  * with the block_rsv.  If there is not enough space it will make an attempt to
3938  * flush out space to make room.  It will do this by flushing delalloc if
3939  * possible or committing the transaction.  If flush is 0 then no attempts to
3940  * regain reservations will be made and this will fail if there is not enough
3941  * space already.
3942  */
3943 static int reserve_metadata_bytes(struct btrfs_root *root,
3944                                   struct btrfs_block_rsv *block_rsv,
3945                                   u64 orig_bytes,
3946                                   enum btrfs_reserve_flush_enum flush)
3947 {
3948         struct btrfs_space_info *space_info = block_rsv->space_info;
3949         u64 used;
3950         u64 num_bytes = orig_bytes;
3951         int flush_state = FLUSH_DELAYED_ITEMS_NR;
3952         int ret = 0;
3953         bool flushing = false;
3954
3955 again:
3956         ret = 0;
3957         spin_lock(&space_info->lock);
3958         /*
3959          * We only want to wait if somebody other than us is flushing and we
3960          * are actually allowed to flush all things.
3961          */
3962         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
3963                space_info->flush) {
3964                 spin_unlock(&space_info->lock);
3965                 /*
3966                  * If we have a trans handle we can't wait because the flusher
3967                  * may have to commit the transaction, which would mean we would
3968                  * deadlock since we are waiting for the flusher to finish, but
3969                  * hold the current transaction open.
3970                  */
3971                 if (current->journal_info)
3972                         return -EAGAIN;
3973                 ret = wait_event_killable(space_info->wait, !space_info->flush);
3974                 /* Must have been killed, return */
3975                 if (ret)
3976                         return -EINTR;
3977
3978                 spin_lock(&space_info->lock);
3979         }
3980
3981         ret = -ENOSPC;
3982         used = space_info->bytes_used + space_info->bytes_reserved +
3983                 space_info->bytes_pinned + space_info->bytes_readonly +
3984                 space_info->bytes_may_use;
3985
3986         /*
3987          * The idea here is that we've not already over-reserved the block group
3988          * then we can go ahead and save our reservation first and then start
3989          * flushing if we need to.  Otherwise if we've already overcommitted
3990          * lets start flushing stuff first and then come back and try to make
3991          * our reservation.
3992          */
3993         if (used <= space_info->total_bytes) {
3994                 if (used + orig_bytes <= space_info->total_bytes) {
3995                         space_info->bytes_may_use += orig_bytes;
3996                         trace_btrfs_space_reservation(root->fs_info,
3997                                 "space_info", space_info->flags, orig_bytes, 1);
3998                         ret = 0;
3999                 } else {
4000                         /*
4001                          * Ok set num_bytes to orig_bytes since we aren't
4002                          * overocmmitted, this way we only try and reclaim what
4003                          * we need.
4004                          */
4005                         num_bytes = orig_bytes;
4006                 }
4007         } else {
4008                 /*
4009                  * Ok we're over committed, set num_bytes to the overcommitted
4010                  * amount plus the amount of bytes that we need for this
4011                  * reservation.
4012                  */
4013                 num_bytes = used - space_info->total_bytes +
4014                         (orig_bytes * 2);
4015         }
4016
4017         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4018                 space_info->bytes_may_use += orig_bytes;
4019                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4020                                               space_info->flags, orig_bytes,
4021                                               1);
4022                 ret = 0;
4023         }
4024
4025         /*
4026          * Couldn't make our reservation, save our place so while we're trying
4027          * to reclaim space we can actually use it instead of somebody else
4028          * stealing it from us.
4029          *
4030          * We make the other tasks wait for the flush only when we can flush
4031          * all things.
4032          */
4033         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4034                 flushing = true;
4035                 space_info->flush = 1;
4036         }
4037
4038         spin_unlock(&space_info->lock);
4039
4040         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4041                 goto out;
4042
4043         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4044                           flush_state);
4045         flush_state++;
4046
4047         /*
4048          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4049          * would happen. So skip delalloc flush.
4050          */
4051         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4052             (flush_state == FLUSH_DELALLOC ||
4053              flush_state == FLUSH_DELALLOC_WAIT))
4054                 flush_state = ALLOC_CHUNK;
4055
4056         if (!ret)
4057                 goto again;
4058         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4059                  flush_state < COMMIT_TRANS)
4060                 goto again;
4061         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4062                  flush_state <= COMMIT_TRANS)
4063                 goto again;
4064
4065 out:
4066         if (flushing) {
4067                 spin_lock(&space_info->lock);
4068                 space_info->flush = 0;
4069                 wake_up_all(&space_info->wait);
4070                 spin_unlock(&space_info->lock);
4071         }
4072         return ret;
4073 }
4074
4075 static struct btrfs_block_rsv *get_block_rsv(
4076                                         const struct btrfs_trans_handle *trans,
4077                                         const struct btrfs_root *root)
4078 {
4079         struct btrfs_block_rsv *block_rsv = NULL;
4080
4081         if (root->ref_cows)
4082                 block_rsv = trans->block_rsv;
4083
4084         if (root == root->fs_info->csum_root && trans->adding_csums)
4085                 block_rsv = trans->block_rsv;
4086
4087         if (!block_rsv)
4088                 block_rsv = root->block_rsv;
4089
4090         if (!block_rsv)
4091                 block_rsv = &root->fs_info->empty_block_rsv;
4092
4093         return block_rsv;
4094 }
4095
4096 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4097                                u64 num_bytes)
4098 {
4099         int ret = -ENOSPC;
4100         spin_lock(&block_rsv->lock);
4101         if (block_rsv->reserved >= num_bytes) {
4102                 block_rsv->reserved -= num_bytes;
4103                 if (block_rsv->reserved < block_rsv->size)
4104                         block_rsv->full = 0;
4105                 ret = 0;
4106         }
4107         spin_unlock(&block_rsv->lock);
4108         return ret;
4109 }
4110
4111 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4112                                 u64 num_bytes, int update_size)
4113 {
4114         spin_lock(&block_rsv->lock);
4115         block_rsv->reserved += num_bytes;
4116         if (update_size)
4117                 block_rsv->size += num_bytes;
4118         else if (block_rsv->reserved >= block_rsv->size)
4119                 block_rsv->full = 1;
4120         spin_unlock(&block_rsv->lock);
4121 }
4122
4123 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4124                                     struct btrfs_block_rsv *block_rsv,
4125                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4126 {
4127         struct btrfs_space_info *space_info = block_rsv->space_info;
4128
4129         spin_lock(&block_rsv->lock);
4130         if (num_bytes == (u64)-1)
4131                 num_bytes = block_rsv->size;
4132         block_rsv->size -= num_bytes;
4133         if (block_rsv->reserved >= block_rsv->size) {
4134                 num_bytes = block_rsv->reserved - block_rsv->size;
4135                 block_rsv->reserved = block_rsv->size;
4136                 block_rsv->full = 1;
4137         } else {
4138                 num_bytes = 0;
4139         }
4140         spin_unlock(&block_rsv->lock);
4141
4142         if (num_bytes > 0) {
4143                 if (dest) {
4144                         spin_lock(&dest->lock);
4145                         if (!dest->full) {
4146                                 u64 bytes_to_add;
4147
4148                                 bytes_to_add = dest->size - dest->reserved;
4149                                 bytes_to_add = min(num_bytes, bytes_to_add);
4150                                 dest->reserved += bytes_to_add;
4151                                 if (dest->reserved >= dest->size)
4152                                         dest->full = 1;
4153                                 num_bytes -= bytes_to_add;
4154                         }
4155                         spin_unlock(&dest->lock);
4156                 }
4157                 if (num_bytes) {
4158                         spin_lock(&space_info->lock);
4159                         space_info->bytes_may_use -= num_bytes;
4160                         trace_btrfs_space_reservation(fs_info, "space_info",
4161                                         space_info->flags, num_bytes, 0);
4162                         space_info->reservation_progress++;
4163                         spin_unlock(&space_info->lock);
4164                 }
4165         }
4166 }
4167
4168 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4169                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4170 {
4171         int ret;
4172
4173         ret = block_rsv_use_bytes(src, num_bytes);
4174         if (ret)
4175                 return ret;
4176
4177         block_rsv_add_bytes(dst, num_bytes, 1);
4178         return 0;
4179 }
4180
4181 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4182 {
4183         memset(rsv, 0, sizeof(*rsv));
4184         spin_lock_init(&rsv->lock);
4185         rsv->type = type;
4186 }
4187
4188 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4189                                               unsigned short type)
4190 {
4191         struct btrfs_block_rsv *block_rsv;
4192         struct btrfs_fs_info *fs_info = root->fs_info;
4193
4194         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4195         if (!block_rsv)
4196                 return NULL;
4197
4198         btrfs_init_block_rsv(block_rsv, type);
4199         block_rsv->space_info = __find_space_info(fs_info,
4200                                                   BTRFS_BLOCK_GROUP_METADATA);
4201         return block_rsv;
4202 }
4203
4204 void btrfs_free_block_rsv(struct btrfs_root *root,
4205                           struct btrfs_block_rsv *rsv)
4206 {
4207         if (!rsv)
4208                 return;
4209         btrfs_block_rsv_release(root, rsv, (u64)-1);
4210         kfree(rsv);
4211 }
4212
4213 int btrfs_block_rsv_add(struct btrfs_root *root,
4214                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4215                         enum btrfs_reserve_flush_enum flush)
4216 {
4217         int ret;
4218
4219         if (num_bytes == 0)
4220                 return 0;
4221
4222         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4223         if (!ret) {
4224                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4225                 return 0;
4226         }
4227
4228         return ret;
4229 }
4230
4231 int btrfs_block_rsv_check(struct btrfs_root *root,
4232                           struct btrfs_block_rsv *block_rsv, int min_factor)
4233 {
4234         u64 num_bytes = 0;
4235         int ret = -ENOSPC;
4236
4237         if (!block_rsv)
4238                 return 0;
4239
4240         spin_lock(&block_rsv->lock);
4241         num_bytes = div_factor(block_rsv->size, min_factor);
4242         if (block_rsv->reserved >= num_bytes)
4243                 ret = 0;
4244         spin_unlock(&block_rsv->lock);
4245
4246         return ret;
4247 }
4248
4249 int btrfs_block_rsv_refill(struct btrfs_root *root,
4250                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4251                            enum btrfs_reserve_flush_enum flush)
4252 {
4253         u64 num_bytes = 0;
4254         int ret = -ENOSPC;
4255
4256         if (!block_rsv)
4257                 return 0;
4258
4259         spin_lock(&block_rsv->lock);
4260         num_bytes = min_reserved;
4261         if (block_rsv->reserved >= num_bytes)
4262                 ret = 0;
4263         else
4264                 num_bytes -= block_rsv->reserved;
4265         spin_unlock(&block_rsv->lock);
4266
4267         if (!ret)
4268                 return 0;
4269
4270         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4271         if (!ret) {
4272                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4273                 return 0;
4274         }
4275
4276         return ret;
4277 }
4278
4279 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4280                             struct btrfs_block_rsv *dst_rsv,
4281                             u64 num_bytes)
4282 {
4283         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4284 }
4285
4286 void btrfs_block_rsv_release(struct btrfs_root *root,
4287                              struct btrfs_block_rsv *block_rsv,
4288                              u64 num_bytes)
4289 {
4290         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4291         if (global_rsv->full || global_rsv == block_rsv ||
4292             block_rsv->space_info != global_rsv->space_info)
4293                 global_rsv = NULL;
4294         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4295                                 num_bytes);
4296 }
4297
4298 /*
4299  * helper to calculate size of global block reservation.
4300  * the desired value is sum of space used by extent tree,
4301  * checksum tree and root tree
4302  */
4303 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4304 {
4305         struct btrfs_space_info *sinfo;
4306         u64 num_bytes;
4307         u64 meta_used;
4308         u64 data_used;
4309         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4310
4311         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4312         spin_lock(&sinfo->lock);
4313         data_used = sinfo->bytes_used;
4314         spin_unlock(&sinfo->lock);
4315
4316         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4317         spin_lock(&sinfo->lock);
4318         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4319                 data_used = 0;
4320         meta_used = sinfo->bytes_used;
4321         spin_unlock(&sinfo->lock);
4322
4323         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4324                     csum_size * 2;
4325         num_bytes += div64_u64(data_used + meta_used, 50);
4326
4327         if (num_bytes * 3 > meta_used)
4328                 num_bytes = div64_u64(meta_used, 3);
4329
4330         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4331 }
4332
4333 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4334 {
4335         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4336         struct btrfs_space_info *sinfo = block_rsv->space_info;
4337         u64 num_bytes;
4338
4339         num_bytes = calc_global_metadata_size(fs_info);
4340
4341         spin_lock(&sinfo->lock);
4342         spin_lock(&block_rsv->lock);
4343
4344         block_rsv->size = num_bytes;
4345
4346         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4347                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4348                     sinfo->bytes_may_use;
4349
4350         if (sinfo->total_bytes > num_bytes) {
4351                 num_bytes = sinfo->total_bytes - num_bytes;
4352                 block_rsv->reserved += num_bytes;
4353                 sinfo->bytes_may_use += num_bytes;
4354                 trace_btrfs_space_reservation(fs_info, "space_info",
4355                                       sinfo->flags, num_bytes, 1);
4356         }
4357
4358         if (block_rsv->reserved >= block_rsv->size) {
4359                 num_bytes = block_rsv->reserved - block_rsv->size;
4360                 sinfo->bytes_may_use -= num_bytes;
4361                 trace_btrfs_space_reservation(fs_info, "space_info",
4362                                       sinfo->flags, num_bytes, 0);
4363                 sinfo->reservation_progress++;
4364                 block_rsv->reserved = block_rsv->size;
4365                 block_rsv->full = 1;
4366         }
4367
4368         spin_unlock(&block_rsv->lock);
4369         spin_unlock(&sinfo->lock);
4370 }
4371
4372 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4373 {
4374         struct btrfs_space_info *space_info;
4375
4376         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4377         fs_info->chunk_block_rsv.space_info = space_info;
4378
4379         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4380         fs_info->global_block_rsv.space_info = space_info;
4381         fs_info->delalloc_block_rsv.space_info = space_info;
4382         fs_info->trans_block_rsv.space_info = space_info;
4383         fs_info->empty_block_rsv.space_info = space_info;
4384         fs_info->delayed_block_rsv.space_info = space_info;
4385
4386         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4387         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4388         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4389         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4390         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4391
4392         update_global_block_rsv(fs_info);
4393 }
4394
4395 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4396 {
4397         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4398                                 (u64)-1);
4399         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4400         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4401         WARN_ON(fs_info->trans_block_rsv.size > 0);
4402         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4403         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4404         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4405         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4406         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4407 }
4408
4409 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4410                                   struct btrfs_root *root)
4411 {
4412         if (!trans->block_rsv)
4413                 return;
4414
4415         if (!trans->bytes_reserved)
4416                 return;
4417
4418         trace_btrfs_space_reservation(root->fs_info, "transaction",
4419                                       trans->transid, trans->bytes_reserved, 0);
4420         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4421         trans->bytes_reserved = 0;
4422 }
4423
4424 /* Can only return 0 or -ENOSPC */
4425 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4426                                   struct inode *inode)
4427 {
4428         struct btrfs_root *root = BTRFS_I(inode)->root;
4429         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4430         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4431
4432         /*
4433          * We need to hold space in order to delete our orphan item once we've
4434          * added it, so this takes the reservation so we can release it later
4435          * when we are truly done with the orphan item.
4436          */
4437         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4438         trace_btrfs_space_reservation(root->fs_info, "orphan",
4439                                       btrfs_ino(inode), num_bytes, 1);
4440         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4441 }
4442
4443 void btrfs_orphan_release_metadata(struct inode *inode)
4444 {
4445         struct btrfs_root *root = BTRFS_I(inode)->root;
4446         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4447         trace_btrfs_space_reservation(root->fs_info, "orphan",
4448                                       btrfs_ino(inode), num_bytes, 0);
4449         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4450 }
4451
4452 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4453                                 struct btrfs_pending_snapshot *pending)
4454 {
4455         struct btrfs_root *root = pending->root;
4456         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4457         struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
4458         /*
4459          * two for root back/forward refs, two for directory entries,
4460          * one for root of the snapshot and one for parent inode.
4461          */
4462         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 6);
4463         dst_rsv->space_info = src_rsv->space_info;
4464         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4465 }
4466
4467 /**
4468  * drop_outstanding_extent - drop an outstanding extent
4469  * @inode: the inode we're dropping the extent for
4470  *
4471  * This is called when we are freeing up an outstanding extent, either called
4472  * after an error or after an extent is written.  This will return the number of
4473  * reserved extents that need to be freed.  This must be called with
4474  * BTRFS_I(inode)->lock held.
4475  */
4476 static unsigned drop_outstanding_extent(struct inode *inode)
4477 {
4478         unsigned drop_inode_space = 0;
4479         unsigned dropped_extents = 0;
4480
4481         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4482         BTRFS_I(inode)->outstanding_extents--;
4483
4484         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4485             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4486                                &BTRFS_I(inode)->runtime_flags))
4487                 drop_inode_space = 1;
4488
4489         /*
4490          * If we have more or the same amount of outsanding extents than we have
4491          * reserved then we need to leave the reserved extents count alone.
4492          */
4493         if (BTRFS_I(inode)->outstanding_extents >=
4494             BTRFS_I(inode)->reserved_extents)
4495                 return drop_inode_space;
4496
4497         dropped_extents = BTRFS_I(inode)->reserved_extents -
4498                 BTRFS_I(inode)->outstanding_extents;
4499         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4500         return dropped_extents + drop_inode_space;
4501 }
4502
4503 /**
4504  * calc_csum_metadata_size - return the amount of metada space that must be
4505  *      reserved/free'd for the given bytes.
4506  * @inode: the inode we're manipulating
4507  * @num_bytes: the number of bytes in question
4508  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4509  *
4510  * This adjusts the number of csum_bytes in the inode and then returns the
4511  * correct amount of metadata that must either be reserved or freed.  We
4512  * calculate how many checksums we can fit into one leaf and then divide the
4513  * number of bytes that will need to be checksumed by this value to figure out
4514  * how many checksums will be required.  If we are adding bytes then the number
4515  * may go up and we will return the number of additional bytes that must be
4516  * reserved.  If it is going down we will return the number of bytes that must
4517  * be freed.
4518  *
4519  * This must be called with BTRFS_I(inode)->lock held.
4520  */
4521 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4522                                    int reserve)
4523 {
4524         struct btrfs_root *root = BTRFS_I(inode)->root;
4525         u64 csum_size;
4526         int num_csums_per_leaf;
4527         int num_csums;
4528         int old_csums;
4529
4530         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4531             BTRFS_I(inode)->csum_bytes == 0)
4532                 return 0;
4533
4534         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4535         if (reserve)
4536                 BTRFS_I(inode)->csum_bytes += num_bytes;
4537         else
4538                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4539         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4540         num_csums_per_leaf = (int)div64_u64(csum_size,
4541                                             sizeof(struct btrfs_csum_item) +
4542                                             sizeof(struct btrfs_disk_key));
4543         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4544         num_csums = num_csums + num_csums_per_leaf - 1;
4545         num_csums = num_csums / num_csums_per_leaf;
4546
4547         old_csums = old_csums + num_csums_per_leaf - 1;
4548         old_csums = old_csums / num_csums_per_leaf;
4549
4550         /* No change, no need to reserve more */
4551         if (old_csums == num_csums)
4552                 return 0;
4553
4554         if (reserve)
4555                 return btrfs_calc_trans_metadata_size(root,
4556                                                       num_csums - old_csums);
4557
4558         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4559 }
4560
4561 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4562 {
4563         struct btrfs_root *root = BTRFS_I(inode)->root;
4564         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4565         u64 to_reserve = 0;
4566         u64 csum_bytes;
4567         unsigned nr_extents = 0;
4568         int extra_reserve = 0;
4569         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4570         int ret = 0;
4571         bool delalloc_lock = true;
4572
4573         /* If we are a free space inode we need to not flush since we will be in
4574          * the middle of a transaction commit.  We also don't need the delalloc
4575          * mutex since we won't race with anybody.  We need this mostly to make
4576          * lockdep shut its filthy mouth.
4577          */
4578         if (btrfs_is_free_space_inode(inode)) {
4579                 flush = BTRFS_RESERVE_NO_FLUSH;
4580                 delalloc_lock = false;
4581         }
4582
4583         if (flush != BTRFS_RESERVE_NO_FLUSH &&
4584             btrfs_transaction_in_commit(root->fs_info))
4585                 schedule_timeout(1);
4586
4587         if (delalloc_lock)
4588                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4589
4590         num_bytes = ALIGN(num_bytes, root->sectorsize);
4591
4592         spin_lock(&BTRFS_I(inode)->lock);
4593         BTRFS_I(inode)->outstanding_extents++;
4594
4595         if (BTRFS_I(inode)->outstanding_extents >
4596             BTRFS_I(inode)->reserved_extents)
4597                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4598                         BTRFS_I(inode)->reserved_extents;
4599
4600         /*
4601          * Add an item to reserve for updating the inode when we complete the
4602          * delalloc io.
4603          */
4604         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4605                       &BTRFS_I(inode)->runtime_flags)) {
4606                 nr_extents++;
4607                 extra_reserve = 1;
4608         }
4609
4610         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4611         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4612         csum_bytes = BTRFS_I(inode)->csum_bytes;
4613         spin_unlock(&BTRFS_I(inode)->lock);
4614
4615         if (root->fs_info->quota_enabled)
4616                 ret = btrfs_qgroup_reserve(root, num_bytes +
4617                                            nr_extents * root->leafsize);
4618
4619         /*
4620          * ret != 0 here means the qgroup reservation failed, we go straight to
4621          * the shared error handling then.
4622          */
4623         if (ret == 0)
4624                 ret = reserve_metadata_bytes(root, block_rsv,
4625                                              to_reserve, flush);
4626
4627         if (ret) {
4628                 u64 to_free = 0;
4629                 unsigned dropped;
4630
4631                 spin_lock(&BTRFS_I(inode)->lock);
4632                 dropped = drop_outstanding_extent(inode);
4633                 /*
4634                  * If the inodes csum_bytes is the same as the original
4635                  * csum_bytes then we know we haven't raced with any free()ers
4636                  * so we can just reduce our inodes csum bytes and carry on.
4637                  * Otherwise we have to do the normal free thing to account for
4638                  * the case that the free side didn't free up its reserve
4639                  * because of this outstanding reservation.
4640                  */
4641                 if (BTRFS_I(inode)->csum_bytes == csum_bytes)
4642                         calc_csum_metadata_size(inode, num_bytes, 0);
4643                 else
4644                         to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4645                 spin_unlock(&BTRFS_I(inode)->lock);
4646                 if (dropped)
4647                         to_free += btrfs_calc_trans_metadata_size(root, dropped);
4648
4649                 if (to_free) {
4650                         btrfs_block_rsv_release(root, block_rsv, to_free);
4651                         trace_btrfs_space_reservation(root->fs_info,
4652                                                       "delalloc",
4653                                                       btrfs_ino(inode),
4654                                                       to_free, 0);
4655                 }
4656                 if (root->fs_info->quota_enabled) {
4657                         btrfs_qgroup_free(root, num_bytes +
4658                                                 nr_extents * root->leafsize);
4659                 }
4660                 if (delalloc_lock)
4661                         mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4662                 return ret;
4663         }
4664
4665         spin_lock(&BTRFS_I(inode)->lock);
4666         if (extra_reserve) {
4667                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4668                         &BTRFS_I(inode)->runtime_flags);
4669                 nr_extents--;
4670         }
4671         BTRFS_I(inode)->reserved_extents += nr_extents;
4672         spin_unlock(&BTRFS_I(inode)->lock);
4673
4674         if (delalloc_lock)
4675                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4676
4677         if (to_reserve)
4678                 trace_btrfs_space_reservation(root->fs_info,"delalloc",
4679                                               btrfs_ino(inode), to_reserve, 1);
4680         block_rsv_add_bytes(block_rsv, to_reserve, 1);
4681
4682         return 0;
4683 }
4684
4685 /**
4686  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4687  * @inode: the inode to release the reservation for
4688  * @num_bytes: the number of bytes we're releasing
4689  *
4690  * This will release the metadata reservation for an inode.  This can be called
4691  * once we complete IO for a given set of bytes to release their metadata
4692  * reservations.
4693  */
4694 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4695 {
4696         struct btrfs_root *root = BTRFS_I(inode)->root;
4697         u64 to_free = 0;
4698         unsigned dropped;
4699
4700         num_bytes = ALIGN(num_bytes, root->sectorsize);
4701         spin_lock(&BTRFS_I(inode)->lock);
4702         dropped = drop_outstanding_extent(inode);
4703
4704         to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4705         spin_unlock(&BTRFS_I(inode)->lock);
4706         if (dropped > 0)
4707                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4708
4709         trace_btrfs_space_reservation(root->fs_info, "delalloc",
4710                                       btrfs_ino(inode), to_free, 0);
4711         if (root->fs_info->quota_enabled) {
4712                 btrfs_qgroup_free(root, num_bytes +
4713                                         dropped * root->leafsize);
4714         }
4715
4716         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4717                                 to_free);
4718 }
4719
4720 /**
4721  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4722  * @inode: inode we're writing to
4723  * @num_bytes: the number of bytes we want to allocate
4724  *
4725  * This will do the following things
4726  *
4727  * o reserve space in the data space info for num_bytes
4728  * o reserve space in the metadata space info based on number of outstanding
4729  *   extents and how much csums will be needed
4730  * o add to the inodes ->delalloc_bytes
4731  * o add it to the fs_info's delalloc inodes list.
4732  *
4733  * This will return 0 for success and -ENOSPC if there is no space left.
4734  */
4735 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4736 {
4737         int ret;
4738
4739         ret = btrfs_check_data_free_space(inode, num_bytes);
4740         if (ret)
4741                 return ret;
4742
4743         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4744         if (ret) {
4745                 btrfs_free_reserved_data_space(inode, num_bytes);
4746                 return ret;
4747         }
4748
4749         return 0;
4750 }
4751
4752 /**
4753  * btrfs_delalloc_release_space - release data and metadata space for delalloc
4754  * @inode: inode we're releasing space for
4755  * @num_bytes: the number of bytes we want to free up
4756  *
4757  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
4758  * called in the case that we don't need the metadata AND data reservations
4759  * anymore.  So if there is an error or we insert an inline extent.
4760  *
4761  * This function will release the metadata space that was not used and will
4762  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
4763  * list if there are no delalloc bytes left.
4764  */
4765 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4766 {
4767         btrfs_delalloc_release_metadata(inode, num_bytes);
4768         btrfs_free_reserved_data_space(inode, num_bytes);
4769 }
4770
4771 static int update_block_group(struct btrfs_root *root,
4772                               u64 bytenr, u64 num_bytes, int alloc)
4773 {
4774         struct btrfs_block_group_cache *cache = NULL;
4775         struct btrfs_fs_info *info = root->fs_info;
4776         u64 total = num_bytes;
4777         u64 old_val;
4778         u64 byte_in_group;
4779         int factor;
4780
4781         /* block accounting for super block */
4782         spin_lock(&info->delalloc_lock);
4783         old_val = btrfs_super_bytes_used(info->super_copy);
4784         if (alloc)
4785                 old_val += num_bytes;
4786         else
4787                 old_val -= num_bytes;
4788         btrfs_set_super_bytes_used(info->super_copy, old_val);
4789         spin_unlock(&info->delalloc_lock);
4790
4791         while (total) {
4792                 cache = btrfs_lookup_block_group(info, bytenr);
4793                 if (!cache)
4794                         return -ENOENT;
4795                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4796                                     BTRFS_BLOCK_GROUP_RAID1 |
4797                                     BTRFS_BLOCK_GROUP_RAID10))
4798                         factor = 2;
4799                 else
4800                         factor = 1;
4801                 /*
4802                  * If this block group has free space cache written out, we
4803                  * need to make sure to load it if we are removing space.  This
4804                  * is because we need the unpinning stage to actually add the
4805                  * space back to the block group, otherwise we will leak space.
4806                  */
4807                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4808                         cache_block_group(cache, 1);
4809
4810                 byte_in_group = bytenr - cache->key.objectid;
4811                 WARN_ON(byte_in_group > cache->key.offset);
4812
4813                 spin_lock(&cache->space_info->lock);
4814                 spin_lock(&cache->lock);
4815
4816                 if (btrfs_test_opt(root, SPACE_CACHE) &&
4817                     cache->disk_cache_state < BTRFS_DC_CLEAR)
4818                         cache->disk_cache_state = BTRFS_DC_CLEAR;
4819
4820                 cache->dirty = 1;
4821                 old_val = btrfs_block_group_used(&cache->item);
4822                 num_bytes = min(total, cache->key.offset - byte_in_group);
4823                 if (alloc) {
4824                         old_val += num_bytes;
4825                         btrfs_set_block_group_used(&cache->item, old_val);
4826                         cache->reserved -= num_bytes;
4827                         cache->space_info->bytes_reserved -= num_bytes;
4828                         cache->space_info->bytes_used += num_bytes;
4829                         cache->space_info->disk_used += num_bytes * factor;
4830                         spin_unlock(&cache->lock);
4831                         spin_unlock(&cache->space_info->lock);
4832                 } else {
4833                         old_val -= num_bytes;
4834                         btrfs_set_block_group_used(&cache->item, old_val);
4835                         cache->pinned += num_bytes;
4836                         cache->space_info->bytes_pinned += num_bytes;
4837                         cache->space_info->bytes_used -= num_bytes;
4838                         cache->space_info->disk_used -= num_bytes * factor;
4839                         spin_unlock(&cache->lock);
4840                         spin_unlock(&cache->space_info->lock);
4841
4842                         set_extent_dirty(info->pinned_extents,
4843                                          bytenr, bytenr + num_bytes - 1,
4844                                          GFP_NOFS | __GFP_NOFAIL);
4845                 }
4846                 btrfs_put_block_group(cache);
4847                 total -= num_bytes;
4848                 bytenr += num_bytes;
4849         }
4850         return 0;
4851 }
4852
4853 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4854 {
4855         struct btrfs_block_group_cache *cache;
4856         u64 bytenr;
4857
4858         spin_lock(&root->fs_info->block_group_cache_lock);
4859         bytenr = root->fs_info->first_logical_byte;
4860         spin_unlock(&root->fs_info->block_group_cache_lock);
4861
4862         if (bytenr < (u64)-1)
4863                 return bytenr;
4864
4865         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4866         if (!cache)
4867                 return 0;
4868
4869         bytenr = cache->key.objectid;
4870         btrfs_put_block_group(cache);
4871
4872         return bytenr;
4873 }
4874
4875 static int pin_down_extent(struct btrfs_root *root,
4876                            struct btrfs_block_group_cache *cache,
4877                            u64 bytenr, u64 num_bytes, int reserved)
4878 {
4879         spin_lock(&cache->space_info->lock);
4880         spin_lock(&cache->lock);
4881         cache->pinned += num_bytes;
4882         cache->space_info->bytes_pinned += num_bytes;
4883         if (reserved) {
4884                 cache->reserved -= num_bytes;
4885                 cache->space_info->bytes_reserved -= num_bytes;
4886         }
4887         spin_unlock(&cache->lock);
4888         spin_unlock(&cache->space_info->lock);
4889
4890         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4891                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4892         return 0;
4893 }
4894
4895 /*
4896  * this function must be called within transaction
4897  */
4898 int btrfs_pin_extent(struct btrfs_root *root,
4899                      u64 bytenr, u64 num_bytes, int reserved)
4900 {
4901         struct btrfs_block_group_cache *cache;
4902
4903         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4904         BUG_ON(!cache); /* Logic error */
4905
4906         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4907
4908         btrfs_put_block_group(cache);
4909         return 0;
4910 }
4911
4912 /*
4913  * this function must be called within transaction
4914  */
4915 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
4916                                     u64 bytenr, u64 num_bytes)
4917 {
4918         struct btrfs_block_group_cache *cache;
4919
4920         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4921         BUG_ON(!cache); /* Logic error */
4922
4923         /*
4924          * pull in the free space cache (if any) so that our pin
4925          * removes the free space from the cache.  We have load_only set
4926          * to one because the slow code to read in the free extents does check
4927          * the pinned extents.
4928          */
4929         cache_block_group(cache, 1);
4930
4931         pin_down_extent(root, cache, bytenr, num_bytes, 0);
4932
4933         /* remove us from the free space cache (if we're there at all) */
4934         btrfs_remove_free_space(cache, bytenr, num_bytes);
4935         btrfs_put_block_group(cache);
4936         return 0;
4937 }
4938
4939 /**
4940  * btrfs_update_reserved_bytes - update the block_group and space info counters
4941  * @cache:      The cache we are manipulating
4942  * @num_bytes:  The number of bytes in question
4943  * @reserve:    One of the reservation enums
4944  *
4945  * This is called by the allocator when it reserves space, or by somebody who is
4946  * freeing space that was never actually used on disk.  For example if you
4947  * reserve some space for a new leaf in transaction A and before transaction A
4948  * commits you free that leaf, you call this with reserve set to 0 in order to
4949  * clear the reservation.
4950  *
4951  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
4952  * ENOSPC accounting.  For data we handle the reservation through clearing the
4953  * delalloc bits in the io_tree.  We have to do this since we could end up
4954  * allocating less disk space for the amount of data we have reserved in the
4955  * case of compression.
4956  *
4957  * If this is a reservation and the block group has become read only we cannot
4958  * make the reservation and return -EAGAIN, otherwise this function always
4959  * succeeds.
4960  */
4961 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4962                                        u64 num_bytes, int reserve)
4963 {
4964         struct btrfs_space_info *space_info = cache->space_info;
4965         int ret = 0;
4966
4967         spin_lock(&space_info->lock);
4968         spin_lock(&cache->lock);
4969         if (reserve != RESERVE_FREE) {
4970                 if (cache->ro) {
4971                         ret = -EAGAIN;
4972                 } else {
4973                         cache->reserved += num_bytes;
4974                         space_info->bytes_reserved += num_bytes;
4975                         if (reserve == RESERVE_ALLOC) {
4976                                 trace_btrfs_space_reservation(cache->fs_info,
4977                                                 "space_info", space_info->flags,
4978                                                 num_bytes, 0);
4979                                 space_info->bytes_may_use -= num_bytes;
4980                         }
4981                 }
4982         } else {
4983                 if (cache->ro)
4984                         space_info->bytes_readonly += num_bytes;
4985                 cache->reserved -= num_bytes;
4986                 space_info->bytes_reserved -= num_bytes;
4987                 space_info->reservation_progress++;
4988         }
4989         spin_unlock(&cache->lock);
4990         spin_unlock(&space_info->lock);
4991         return ret;
4992 }
4993
4994 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4995                                 struct btrfs_root *root)
4996 {
4997         struct btrfs_fs_info *fs_info = root->fs_info;
4998         struct btrfs_caching_control *next;
4999         struct btrfs_caching_control *caching_ctl;
5000         struct btrfs_block_group_cache *cache;
5001
5002         down_write(&fs_info->extent_commit_sem);
5003
5004         list_for_each_entry_safe(caching_ctl, next,
5005                                  &fs_info->caching_block_groups, list) {
5006                 cache = caching_ctl->block_group;
5007                 if (block_group_cache_done(cache)) {
5008                         cache->last_byte_to_unpin = (u64)-1;
5009                         list_del_init(&caching_ctl->list);
5010                         put_caching_control(caching_ctl);
5011                 } else {
5012                         cache->last_byte_to_unpin = caching_ctl->progress;
5013                 }
5014         }
5015
5016         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5017                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5018         else
5019                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5020
5021         up_write(&fs_info->extent_commit_sem);
5022
5023         update_global_block_rsv(fs_info);
5024 }
5025
5026 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5027 {
5028         struct btrfs_fs_info *fs_info = root->fs_info;
5029         struct btrfs_block_group_cache *cache = NULL;
5030         struct btrfs_space_info *space_info;
5031         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5032         u64 len;
5033         bool readonly;
5034
5035         while (start <= end) {
5036                 readonly = false;
5037                 if (!cache ||
5038                     start >= cache->key.objectid + cache->key.offset) {
5039                         if (cache)
5040                                 btrfs_put_block_group(cache);
5041                         cache = btrfs_lookup_block_group(fs_info, start);
5042                         BUG_ON(!cache); /* Logic error */
5043                 }
5044
5045                 len = cache->key.objectid + cache->key.offset - start;
5046                 len = min(len, end + 1 - start);
5047
5048                 if (start < cache->last_byte_to_unpin) {
5049                         len = min(len, cache->last_byte_to_unpin - start);
5050                         btrfs_add_free_space(cache, start, len);
5051                 }
5052
5053                 start += len;
5054                 space_info = cache->space_info;
5055
5056                 spin_lock(&space_info->lock);
5057                 spin_lock(&cache->lock);
5058                 cache->pinned -= len;
5059                 space_info->bytes_pinned -= len;
5060                 if (cache->ro) {
5061                         space_info->bytes_readonly += len;
5062                         readonly = true;
5063                 }
5064                 spin_unlock(&cache->lock);
5065                 if (!readonly && global_rsv->space_info == space_info) {
5066                         spin_lock(&global_rsv->lock);
5067                         if (!global_rsv->full) {
5068                                 len = min(len, global_rsv->size -
5069                                           global_rsv->reserved);
5070                                 global_rsv->reserved += len;
5071                                 space_info->bytes_may_use += len;
5072                                 if (global_rsv->reserved >= global_rsv->size)
5073                                         global_rsv->full = 1;
5074                         }
5075                         spin_unlock(&global_rsv->lock);
5076                 }
5077                 spin_unlock(&space_info->lock);
5078         }
5079
5080         if (cache)
5081                 btrfs_put_block_group(cache);
5082         return 0;
5083 }
5084
5085 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5086                                struct btrfs_root *root)
5087 {
5088         struct btrfs_fs_info *fs_info = root->fs_info;
5089         struct extent_io_tree *unpin;
5090         u64 start;
5091         u64 end;
5092         int ret;
5093
5094         if (trans->aborted)
5095                 return 0;
5096
5097         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5098                 unpin = &fs_info->freed_extents[1];
5099         else
5100                 unpin = &fs_info->freed_extents[0];
5101
5102         while (1) {
5103                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5104                                             EXTENT_DIRTY, NULL);
5105                 if (ret)
5106                         break;
5107
5108                 if (btrfs_test_opt(root, DISCARD))
5109                         ret = btrfs_discard_extent(root, start,
5110                                                    end + 1 - start, NULL);
5111
5112                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5113                 unpin_extent_range(root, start, end);
5114                 cond_resched();
5115         }
5116
5117         return 0;
5118 }
5119
5120 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5121                                 struct btrfs_root *root,
5122                                 u64 bytenr, u64 num_bytes, u64 parent,
5123                                 u64 root_objectid, u64 owner_objectid,
5124                                 u64 owner_offset, int refs_to_drop,
5125                                 struct btrfs_delayed_extent_op *extent_op)
5126 {
5127         struct btrfs_key key;
5128         struct btrfs_path *path;
5129         struct btrfs_fs_info *info = root->fs_info;
5130         struct btrfs_root *extent_root = info->extent_root;
5131         struct extent_buffer *leaf;
5132         struct btrfs_extent_item *ei;
5133         struct btrfs_extent_inline_ref *iref;
5134         int ret;
5135         int is_data;
5136         int extent_slot = 0;
5137         int found_extent = 0;
5138         int num_to_del = 1;
5139         u32 item_size;
5140         u64 refs;
5141
5142         path = btrfs_alloc_path();
5143         if (!path)
5144                 return -ENOMEM;
5145
5146         path->reada = 1;
5147         path->leave_spinning = 1;
5148
5149         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5150         BUG_ON(!is_data && refs_to_drop != 1);
5151
5152         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5153                                     bytenr, num_bytes, parent,
5154                                     root_objectid, owner_objectid,
5155                                     owner_offset);
5156         if (ret == 0) {
5157                 extent_slot = path->slots[0];
5158                 while (extent_slot >= 0) {
5159                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5160                                               extent_slot);
5161                         if (key.objectid != bytenr)
5162                                 break;
5163                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5164                             key.offset == num_bytes) {
5165                                 found_extent = 1;
5166                                 break;
5167                         }
5168                         if (path->slots[0] - extent_slot > 5)
5169                                 break;
5170                         extent_slot--;
5171                 }
5172 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5173                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5174                 if (found_extent && item_size < sizeof(*ei))
5175                         found_extent = 0;
5176 #endif
5177                 if (!found_extent) {
5178                         BUG_ON(iref);
5179                         ret = remove_extent_backref(trans, extent_root, path,
5180                                                     NULL, refs_to_drop,
5181                                                     is_data);
5182                         if (ret) {
5183                                 btrfs_abort_transaction(trans, extent_root, ret);
5184                                 goto out;
5185                         }
5186                         btrfs_release_path(path);
5187                         path->leave_spinning = 1;
5188
5189                         key.objectid = bytenr;
5190                         key.type = BTRFS_EXTENT_ITEM_KEY;
5191                         key.offset = num_bytes;
5192
5193                         ret = btrfs_search_slot(trans, extent_root,
5194                                                 &key, path, -1, 1);
5195                         if (ret) {
5196                                 printk(KERN_ERR "umm, got %d back from search"
5197                                        ", was looking for %llu\n", ret,
5198                                        (unsigned long long)bytenr);
5199                                 if (ret > 0)
5200                                         btrfs_print_leaf(extent_root,
5201                                                          path->nodes[0]);
5202                         }
5203                         if (ret < 0) {
5204                                 btrfs_abort_transaction(trans, extent_root, ret);
5205                                 goto out;
5206                         }
5207                         extent_slot = path->slots[0];
5208                 }
5209         } else if (ret == -ENOENT) {
5210                 btrfs_print_leaf(extent_root, path->nodes[0]);
5211                 WARN_ON(1);
5212                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
5213                        "parent %llu root %llu  owner %llu offset %llu\n",
5214                        (unsigned long long)bytenr,
5215                        (unsigned long long)parent,
5216                        (unsigned long long)root_objectid,
5217                        (unsigned long long)owner_objectid,
5218                        (unsigned long long)owner_offset);
5219         } else {
5220                 btrfs_abort_transaction(trans, extent_root, ret);
5221                 goto out;
5222         }
5223
5224         leaf = path->nodes[0];
5225         item_size = btrfs_item_size_nr(leaf, extent_slot);
5226 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5227         if (item_size < sizeof(*ei)) {
5228                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5229                 ret = convert_extent_item_v0(trans, extent_root, path,
5230                                              owner_objectid, 0);
5231                 if (ret < 0) {
5232                         btrfs_abort_transaction(trans, extent_root, ret);
5233                         goto out;
5234                 }
5235
5236                 btrfs_release_path(path);
5237                 path->leave_spinning = 1;
5238
5239                 key.objectid = bytenr;
5240                 key.type = BTRFS_EXTENT_ITEM_KEY;
5241                 key.offset = num_bytes;
5242
5243                 ret = btrfs_search_slot(trans, extent_root, &key, path,
5244                                         -1, 1);
5245                 if (ret) {
5246                         printk(KERN_ERR "umm, got %d back from search"
5247                                ", was looking for %llu\n", ret,
5248                                (unsigned long long)bytenr);
5249                         btrfs_print_leaf(extent_root, path->nodes[0]);
5250                 }
5251                 if (ret < 0) {
5252                         btrfs_abort_transaction(trans, extent_root, ret);
5253                         goto out;
5254                 }
5255
5256                 extent_slot = path->slots[0];
5257                 leaf = path->nodes[0];
5258                 item_size = btrfs_item_size_nr(leaf, extent_slot);
5259         }
5260 #endif
5261         BUG_ON(item_size < sizeof(*ei));
5262         ei = btrfs_item_ptr(leaf, extent_slot,
5263                             struct btrfs_extent_item);
5264         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5265                 struct btrfs_tree_block_info *bi;
5266                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5267                 bi = (struct btrfs_tree_block_info *)(ei + 1);
5268                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5269         }
5270
5271         refs = btrfs_extent_refs(leaf, ei);
5272         BUG_ON(refs < refs_to_drop);
5273         refs -= refs_to_drop;
5274
5275         if (refs > 0) {
5276                 if (extent_op)
5277                         __run_delayed_extent_op(extent_op, leaf, ei);
5278                 /*
5279                  * In the case of inline back ref, reference count will
5280                  * be updated by remove_extent_backref
5281                  */
5282                 if (iref) {
5283                         BUG_ON(!found_extent);
5284                 } else {
5285                         btrfs_set_extent_refs(leaf, ei, refs);
5286                         btrfs_mark_buffer_dirty(leaf);
5287                 }
5288                 if (found_extent) {
5289                         ret = remove_extent_backref(trans, extent_root, path,
5290                                                     iref, refs_to_drop,
5291                                                     is_data);
5292                         if (ret) {
5293                                 btrfs_abort_transaction(trans, extent_root, ret);
5294                                 goto out;
5295                         }
5296                 }
5297         } else {
5298                 if (found_extent) {
5299                         BUG_ON(is_data && refs_to_drop !=
5300                                extent_data_ref_count(root, path, iref));
5301                         if (iref) {
5302                                 BUG_ON(path->slots[0] != extent_slot);
5303                         } else {
5304                                 BUG_ON(path->slots[0] != extent_slot + 1);
5305                                 path->slots[0] = extent_slot;
5306                                 num_to_del = 2;
5307                         }
5308                 }
5309
5310                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5311                                       num_to_del);
5312                 if (ret) {
5313                         btrfs_abort_transaction(trans, extent_root, ret);
5314                         goto out;
5315                 }
5316                 btrfs_release_path(path);
5317
5318                 if (is_data) {
5319                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5320                         if (ret) {
5321                                 btrfs_abort_transaction(trans, extent_root, ret);
5322                                 goto out;
5323                         }
5324                 }
5325
5326                 ret = update_block_group(root, bytenr, num_bytes, 0);
5327                 if (ret) {
5328                         btrfs_abort_transaction(trans, extent_root, ret);
5329                         goto out;
5330                 }
5331         }
5332 out:
5333         btrfs_free_path(path);
5334         return ret;
5335 }
5336
5337 /*
5338  * when we free an block, it is possible (and likely) that we free the last
5339  * delayed ref for that extent as well.  This searches the delayed ref tree for
5340  * a given extent, and if there are no other delayed refs to be processed, it
5341  * removes it from the tree.
5342  */
5343 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5344                                       struct btrfs_root *root, u64 bytenr)
5345 {
5346         struct btrfs_delayed_ref_head *head;
5347         struct btrfs_delayed_ref_root *delayed_refs;
5348         struct btrfs_delayed_ref_node *ref;
5349         struct rb_node *node;
5350         int ret = 0;
5351
5352         delayed_refs = &trans->transaction->delayed_refs;
5353         spin_lock(&delayed_refs->lock);
5354         head = btrfs_find_delayed_ref_head(trans, bytenr);
5355         if (!head)
5356                 goto out;
5357
5358         node = rb_prev(&head->node.rb_node);
5359         if (!node)
5360                 goto out;
5361
5362         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5363
5364         /* there are still entries for this ref, we can't drop it */
5365         if (ref->bytenr == bytenr)
5366                 goto out;
5367
5368         if (head->extent_op) {
5369                 if (!head->must_insert_reserved)
5370                         goto out;
5371                 btrfs_free_delayed_extent_op(head->extent_op);
5372                 head->extent_op = NULL;
5373         }
5374
5375         /*
5376          * waiting for the lock here would deadlock.  If someone else has it
5377          * locked they are already in the process of dropping it anyway
5378          */
5379         if (!mutex_trylock(&head->mutex))
5380                 goto out;
5381
5382         /*
5383          * at this point we have a head with no other entries.  Go
5384          * ahead and process it.
5385          */
5386         head->node.in_tree = 0;
5387         rb_erase(&head->node.rb_node, &delayed_refs->root);
5388
5389         delayed_refs->num_entries--;
5390
5391         /*
5392          * we don't take a ref on the node because we're removing it from the
5393          * tree, so we just steal the ref the tree was holding.
5394          */
5395         delayed_refs->num_heads--;
5396         if (list_empty(&head->cluster))
5397                 delayed_refs->num_heads_ready--;
5398
5399         list_del_init(&head->cluster);
5400         spin_unlock(&delayed_refs->lock);
5401
5402         BUG_ON(head->extent_op);
5403         if (head->must_insert_reserved)
5404                 ret = 1;
5405
5406         mutex_unlock(&head->mutex);
5407         btrfs_put_delayed_ref(&head->node);
5408         return ret;
5409 out:
5410         spin_unlock(&delayed_refs->lock);
5411         return 0;
5412 }
5413
5414 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5415                            struct btrfs_root *root,
5416                            struct extent_buffer *buf,
5417                            u64 parent, int last_ref)
5418 {
5419         struct btrfs_block_group_cache *cache = NULL;
5420         int ret;
5421
5422         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5423                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5424                                         buf->start, buf->len,
5425                                         parent, root->root_key.objectid,
5426                                         btrfs_header_level(buf),
5427                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
5428                 BUG_ON(ret); /* -ENOMEM */
5429         }
5430
5431         if (!last_ref)
5432                 return;
5433
5434         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5435
5436         if (btrfs_header_generation(buf) == trans->transid) {
5437                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5438                         ret = check_ref_cleanup(trans, root, buf->start);
5439                         if (!ret)
5440                                 goto out;
5441                 }
5442
5443                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5444                         pin_down_extent(root, cache, buf->start, buf->len, 1);
5445                         goto out;
5446                 }
5447
5448                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5449
5450                 btrfs_add_free_space(cache, buf->start, buf->len);
5451                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5452         }
5453 out:
5454         /*
5455          * Deleting the buffer, clear the corrupt flag since it doesn't matter
5456          * anymore.
5457          */
5458         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5459         btrfs_put_block_group(cache);
5460 }
5461
5462 /* Can return -ENOMEM */
5463 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5464                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5465                       u64 owner, u64 offset, int for_cow)
5466 {
5467         int ret;
5468         struct btrfs_fs_info *fs_info = root->fs_info;
5469
5470         /*
5471          * tree log blocks never actually go into the extent allocation
5472          * tree, just update pinning info and exit early.
5473          */
5474         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5475                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5476                 /* unlocks the pinned mutex */
5477                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
5478                 ret = 0;
5479         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5480                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5481                                         num_bytes,
5482                                         parent, root_objectid, (int)owner,
5483                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5484         } else {
5485                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5486                                                 num_bytes,
5487                                                 parent, root_objectid, owner,
5488                                                 offset, BTRFS_DROP_DELAYED_REF,
5489                                                 NULL, for_cow);
5490         }
5491         return ret;
5492 }
5493
5494 static u64 stripe_align(struct btrfs_root *root, u64 val)
5495 {
5496         u64 mask = ((u64)root->stripesize - 1);
5497         u64 ret = (val + mask) & ~mask;
5498         return ret;
5499 }
5500
5501 /*
5502  * when we wait for progress in the block group caching, its because
5503  * our allocation attempt failed at least once.  So, we must sleep
5504  * and let some progress happen before we try again.
5505  *
5506  * This function will sleep at least once waiting for new free space to
5507  * show up, and then it will check the block group free space numbers
5508  * for our min num_bytes.  Another option is to have it go ahead
5509  * and look in the rbtree for a free extent of a given size, but this
5510  * is a good start.
5511  */
5512 static noinline int
5513 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
5514                                 u64 num_bytes)
5515 {
5516         struct btrfs_caching_control *caching_ctl;
5517         DEFINE_WAIT(wait);
5518
5519         caching_ctl = get_caching_control(cache);
5520         if (!caching_ctl)
5521                 return 0;
5522
5523         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
5524                    (cache->free_space_ctl->free_space >= num_bytes));
5525
5526         put_caching_control(caching_ctl);
5527         return 0;
5528 }
5529
5530 static noinline int
5531 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5532 {
5533         struct btrfs_caching_control *caching_ctl;
5534         DEFINE_WAIT(wait);
5535
5536         caching_ctl = get_caching_control(cache);
5537         if (!caching_ctl)
5538                 return 0;
5539
5540         wait_event(caching_ctl->wait, block_group_cache_done(cache));
5541
5542         put_caching_control(caching_ctl);
5543         return 0;
5544 }
5545
5546 int __get_raid_index(u64 flags)
5547 {
5548         int index;
5549
5550         if (flags & BTRFS_BLOCK_GROUP_RAID10)
5551                 index = 0;
5552         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
5553                 index = 1;
5554         else if (flags & BTRFS_BLOCK_GROUP_DUP)
5555                 index = 2;
5556         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
5557                 index = 3;
5558         else
5559                 index = 4;
5560
5561         return index;
5562 }
5563
5564 static int get_block_group_index(struct btrfs_block_group_cache *cache)
5565 {
5566         return __get_raid_index(cache->flags);
5567 }
5568
5569 enum btrfs_loop_type {
5570         LOOP_CACHING_NOWAIT = 0,
5571         LOOP_CACHING_WAIT = 1,
5572         LOOP_ALLOC_CHUNK = 2,
5573         LOOP_NO_EMPTY_SIZE = 3,
5574 };
5575
5576 /*
5577  * walks the btree of allocated extents and find a hole of a given size.
5578  * The key ins is changed to record the hole:
5579  * ins->objectid == block start
5580  * ins->flags = BTRFS_EXTENT_ITEM_KEY
5581  * ins->offset == number of blocks
5582  * Any available blocks before search_start are skipped.
5583  */
5584 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5585                                      struct btrfs_root *orig_root,
5586                                      u64 num_bytes, u64 empty_size,
5587                                      u64 hint_byte, struct btrfs_key *ins,
5588                                      u64 data)
5589 {
5590         int ret = 0;
5591         struct btrfs_root *root = orig_root->fs_info->extent_root;
5592         struct btrfs_free_cluster *last_ptr = NULL;
5593         struct btrfs_block_group_cache *block_group = NULL;
5594         struct btrfs_block_group_cache *used_block_group;
5595         u64 search_start = 0;
5596         int empty_cluster = 2 * 1024 * 1024;
5597         struct btrfs_space_info *space_info;
5598         int loop = 0;
5599         int index = __get_raid_index(data);
5600         int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5601                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
5602         bool found_uncached_bg = false;
5603         bool failed_cluster_refill = false;
5604         bool failed_alloc = false;
5605         bool use_cluster = true;
5606         bool have_caching_bg = false;
5607
5608         WARN_ON(num_bytes < root->sectorsize);
5609         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
5610         ins->objectid = 0;
5611         ins->offset = 0;
5612
5613         trace_find_free_extent(orig_root, num_bytes, empty_size, data);
5614
5615         space_info = __find_space_info(root->fs_info, data);
5616         if (!space_info) {
5617                 printk(KERN_ERR "No space info for %llu\n", data);
5618                 return -ENOSPC;
5619         }
5620
5621         /*
5622          * If the space info is for both data and metadata it means we have a
5623          * small filesystem and we can't use the clustering stuff.
5624          */
5625         if (btrfs_mixed_space_info(space_info))
5626                 use_cluster = false;
5627
5628         if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
5629                 last_ptr = &root->fs_info->meta_alloc_cluster;
5630                 if (!btrfs_test_opt(root, SSD))
5631                         empty_cluster = 64 * 1024;
5632         }
5633
5634         if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5635             btrfs_test_opt(root, SSD)) {
5636                 last_ptr = &root->fs_info->data_alloc_cluster;
5637         }
5638
5639         if (last_ptr) {
5640                 spin_lock(&last_ptr->lock);
5641                 if (last_ptr->block_group)
5642                         hint_byte = last_ptr->window_start;
5643                 spin_unlock(&last_ptr->lock);
5644         }
5645
5646         search_start = max(search_start, first_logical_byte(root, 0));
5647         search_start = max(search_start, hint_byte);
5648
5649         if (!last_ptr)
5650                 empty_cluster = 0;
5651
5652         if (search_start == hint_byte) {
5653                 block_group = btrfs_lookup_block_group(root->fs_info,
5654                                                        search_start);
5655                 used_block_group = block_group;
5656                 /*
5657                  * we don't want to use the block group if it doesn't match our
5658                  * allocation bits, or if its not cached.
5659                  *
5660                  * However if we are re-searching with an ideal block group
5661                  * picked out then we don't care that the block group is cached.
5662                  */
5663                 if (block_group && block_group_bits(block_group, data) &&
5664                     block_group->cached != BTRFS_CACHE_NO) {
5665                         down_read(&space_info->groups_sem);
5666                         if (list_empty(&block_group->list) ||
5667                             block_group->ro) {
5668                                 /*
5669                                  * someone is removing this block group,
5670                                  * we can't jump into the have_block_group
5671                                  * target because our list pointers are not
5672                                  * valid
5673                                  */
5674                                 btrfs_put_block_group(block_group);
5675                                 up_read(&space_info->groups_sem);
5676                         } else {
5677                                 index = get_block_group_index(block_group);
5678                                 goto have_block_group;
5679                         }
5680                 } else if (block_group) {
5681                         btrfs_put_block_group(block_group);
5682                 }
5683         }
5684 search:
5685         have_caching_bg = false;
5686         down_read(&space_info->groups_sem);
5687         list_for_each_entry(block_group, &space_info->block_groups[index],
5688                             list) {
5689                 u64 offset;
5690                 int cached;
5691
5692                 used_block_group = block_group;
5693                 btrfs_get_block_group(block_group);
5694                 search_start = block_group->key.objectid;
5695
5696                 /*
5697                  * this can happen if we end up cycling through all the
5698                  * raid types, but we want to make sure we only allocate
5699                  * for the proper type.
5700                  */
5701                 if (!block_group_bits(block_group, data)) {
5702                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
5703                                 BTRFS_BLOCK_GROUP_RAID1 |
5704                                 BTRFS_BLOCK_GROUP_RAID10;
5705
5706                         /*
5707                          * if they asked for extra copies and this block group
5708                          * doesn't provide them, bail.  This does allow us to
5709                          * fill raid0 from raid1.
5710                          */
5711                         if ((data & extra) && !(block_group->flags & extra))
5712                                 goto loop;
5713                 }
5714
5715 have_block_group:
5716                 cached = block_group_cache_done(block_group);
5717                 if (unlikely(!cached)) {
5718                         found_uncached_bg = true;
5719                         ret = cache_block_group(block_group, 0);
5720                         BUG_ON(ret < 0);
5721                         ret = 0;
5722                 }
5723
5724                 if (unlikely(block_group->ro))
5725                         goto loop;
5726
5727                 /*
5728                  * Ok we want to try and use the cluster allocator, so
5729                  * lets look there
5730                  */
5731                 if (last_ptr) {
5732                         /*
5733                          * the refill lock keeps out other
5734                          * people trying to start a new cluster
5735                          */
5736                         spin_lock(&last_ptr->refill_lock);
5737                         used_block_group = last_ptr->block_group;
5738                         if (used_block_group != block_group &&
5739                             (!used_block_group ||
5740                              used_block_group->ro ||
5741                              !block_group_bits(used_block_group, data))) {
5742                                 used_block_group = block_group;
5743                                 goto refill_cluster;
5744                         }
5745
5746                         if (used_block_group != block_group)
5747                                 btrfs_get_block_group(used_block_group);
5748
5749                         offset = btrfs_alloc_from_cluster(used_block_group,
5750                           last_ptr, num_bytes, used_block_group->key.objectid);
5751                         if (offset) {
5752                                 /* we have a block, we're done */
5753                                 spin_unlock(&last_ptr->refill_lock);
5754                                 trace_btrfs_reserve_extent_cluster(root,
5755                                         block_group, search_start, num_bytes);
5756                                 goto checks;
5757                         }
5758
5759                         WARN_ON(last_ptr->block_group != used_block_group);
5760                         if (used_block_group != block_group) {
5761                                 btrfs_put_block_group(used_block_group);
5762                                 used_block_group = block_group;
5763                         }
5764 refill_cluster:
5765                         BUG_ON(used_block_group != block_group);
5766                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
5767                          * set up a new clusters, so lets just skip it
5768                          * and let the allocator find whatever block
5769                          * it can find.  If we reach this point, we
5770                          * will have tried the cluster allocator
5771                          * plenty of times and not have found
5772                          * anything, so we are likely way too
5773                          * fragmented for the clustering stuff to find
5774                          * anything.
5775                          *
5776                          * However, if the cluster is taken from the
5777                          * current block group, release the cluster
5778                          * first, so that we stand a better chance of
5779                          * succeeding in the unclustered
5780                          * allocation.  */
5781                         if (loop >= LOOP_NO_EMPTY_SIZE &&
5782                             last_ptr->block_group != block_group) {
5783                                 spin_unlock(&last_ptr->refill_lock);
5784                                 goto unclustered_alloc;
5785                         }
5786
5787                         /*
5788                          * this cluster didn't work out, free it and
5789                          * start over
5790                          */
5791                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5792
5793                         if (loop >= LOOP_NO_EMPTY_SIZE) {
5794                                 spin_unlock(&last_ptr->refill_lock);
5795                                 goto unclustered_alloc;
5796                         }
5797
5798                         /* allocate a cluster in this block group */
5799                         ret = btrfs_find_space_cluster(trans, root,
5800                                                block_group, last_ptr,
5801                                                search_start, num_bytes,
5802                                                empty_cluster + empty_size);
5803                         if (ret == 0) {
5804                                 /*
5805                                  * now pull our allocation out of this
5806                                  * cluster
5807                                  */
5808                                 offset = btrfs_alloc_from_cluster(block_group,
5809                                                   last_ptr, num_bytes,
5810                                                   search_start);
5811                                 if (offset) {
5812                                         /* we found one, proceed */
5813                                         spin_unlock(&last_ptr->refill_lock);
5814                                         trace_btrfs_reserve_extent_cluster(root,
5815                                                 block_group, search_start,
5816                                                 num_bytes);
5817                                         goto checks;
5818                                 }
5819                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
5820                                    && !failed_cluster_refill) {
5821                                 spin_unlock(&last_ptr->refill_lock);
5822
5823                                 failed_cluster_refill = true;
5824                                 wait_block_group_cache_progress(block_group,
5825                                        num_bytes + empty_cluster + empty_size);
5826                                 goto have_block_group;
5827                         }
5828
5829                         /*
5830                          * at this point we either didn't find a cluster
5831                          * or we weren't able to allocate a block from our
5832                          * cluster.  Free the cluster we've been trying
5833                          * to use, and go to the next block group
5834                          */
5835                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5836                         spin_unlock(&last_ptr->refill_lock);
5837                         goto loop;
5838                 }
5839
5840 unclustered_alloc:
5841                 spin_lock(&block_group->free_space_ctl->tree_lock);
5842                 if (cached &&
5843                     block_group->free_space_ctl->free_space <
5844                     num_bytes + empty_cluster + empty_size) {
5845                         spin_unlock(&block_group->free_space_ctl->tree_lock);
5846                         goto loop;
5847                 }
5848                 spin_unlock(&block_group->free_space_ctl->tree_lock);
5849
5850                 offset = btrfs_find_space_for_alloc(block_group, search_start,
5851                                                     num_bytes, empty_size);
5852                 /*
5853                  * If we didn't find a chunk, and we haven't failed on this
5854                  * block group before, and this block group is in the middle of
5855                  * caching and we are ok with waiting, then go ahead and wait
5856                  * for progress to be made, and set failed_alloc to true.
5857                  *
5858                  * If failed_alloc is true then we've already waited on this
5859                  * block group once and should move on to the next block group.
5860                  */
5861                 if (!offset && !failed_alloc && !cached &&
5862                     loop > LOOP_CACHING_NOWAIT) {
5863                         wait_block_group_cache_progress(block_group,
5864                                                 num_bytes + empty_size);
5865                         failed_alloc = true;
5866                         goto have_block_group;
5867                 } else if (!offset) {
5868                         if (!cached)
5869                                 have_caching_bg = true;
5870                         goto loop;
5871                 }
5872 checks:
5873                 search_start = stripe_align(root, offset);
5874
5875                 /* move on to the next group */
5876                 if (search_start + num_bytes >
5877                     used_block_group->key.objectid + used_block_group->key.offset) {
5878                         btrfs_add_free_space(used_block_group, offset, num_bytes);
5879                         goto loop;
5880                 }
5881
5882                 if (offset < search_start)
5883                         btrfs_add_free_space(used_block_group, offset,
5884                                              search_start - offset);
5885                 BUG_ON(offset > search_start);
5886
5887                 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
5888                                                   alloc_type);
5889                 if (ret == -EAGAIN) {
5890                         btrfs_add_free_space(used_block_group, offset, num_bytes);
5891                         goto loop;
5892                 }
5893
5894                 /* we are all good, lets return */
5895                 ins->objectid = search_start;
5896                 ins->offset = num_bytes;
5897
5898                 trace_btrfs_reserve_extent(orig_root, block_group,
5899                                            search_start, num_bytes);
5900                 if (used_block_group != block_group)
5901                         btrfs_put_block_group(used_block_group);
5902                 btrfs_put_block_group(block_group);
5903                 break;
5904 loop:
5905                 failed_cluster_refill = false;
5906                 failed_alloc = false;
5907                 BUG_ON(index != get_block_group_index(block_group));
5908                 if (used_block_group != block_group)
5909                         btrfs_put_block_group(used_block_group);
5910                 btrfs_put_block_group(block_group);
5911         }
5912         up_read(&space_info->groups_sem);
5913
5914         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
5915                 goto search;
5916
5917         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5918                 goto search;
5919
5920         /*
5921          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5922          *                      caching kthreads as we move along
5923          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5924          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5925          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5926          *                      again
5927          */
5928         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
5929                 index = 0;
5930                 loop++;
5931                 if (loop == LOOP_ALLOC_CHUNK) {
5932                         ret = do_chunk_alloc(trans, root, data,
5933                                              CHUNK_ALLOC_FORCE);
5934                         /*
5935                          * Do not bail out on ENOSPC since we
5936                          * can do more things.
5937                          */
5938                         if (ret < 0 && ret != -ENOSPC) {
5939                                 btrfs_abort_transaction(trans,
5940                                                         root, ret);
5941                                 goto out;
5942                         }
5943                 }
5944
5945                 if (loop == LOOP_NO_EMPTY_SIZE) {
5946                         empty_size = 0;
5947                         empty_cluster = 0;
5948                 }
5949
5950                 goto search;
5951         } else if (!ins->objectid) {
5952                 ret = -ENOSPC;
5953         } else if (ins->objectid) {
5954                 ret = 0;
5955         }
5956 out:
5957
5958         return ret;
5959 }
5960
5961 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5962                             int dump_block_groups)
5963 {
5964         struct btrfs_block_group_cache *cache;
5965         int index = 0;
5966
5967         spin_lock(&info->lock);
5968         printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
5969                (unsigned long long)info->flags,
5970                (unsigned long long)(info->total_bytes - info->bytes_used -
5971                                     info->bytes_pinned - info->bytes_reserved -
5972                                     info->bytes_readonly),
5973                (info->full) ? "" : "not ");
5974         printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5975                "reserved=%llu, may_use=%llu, readonly=%llu\n",
5976                (unsigned long long)info->total_bytes,
5977                (unsigned long long)info->bytes_used,
5978                (unsigned long long)info->bytes_pinned,
5979                (unsigned long long)info->bytes_reserved,
5980                (unsigned long long)info->bytes_may_use,
5981                (unsigned long long)info->bytes_readonly);
5982         spin_unlock(&info->lock);
5983
5984         if (!dump_block_groups)
5985                 return;
5986
5987         down_read(&info->groups_sem);
5988 again:
5989         list_for_each_entry(cache, &info->block_groups[index], list) {
5990                 spin_lock(&cache->lock);
5991                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
5992                        (unsigned long long)cache->key.objectid,
5993                        (unsigned long long)cache->key.offset,
5994                        (unsigned long long)btrfs_block_group_used(&cache->item),
5995                        (unsigned long long)cache->pinned,
5996                        (unsigned long long)cache->reserved,
5997                        cache->ro ? "[readonly]" : "");
5998                 btrfs_dump_free_space(cache, bytes);
5999                 spin_unlock(&cache->lock);
6000         }
6001         if (++index < BTRFS_NR_RAID_TYPES)
6002                 goto again;
6003         up_read(&info->groups_sem);
6004 }
6005
6006 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
6007                          struct btrfs_root *root,
6008                          u64 num_bytes, u64 min_alloc_size,
6009                          u64 empty_size, u64 hint_byte,
6010                          struct btrfs_key *ins, u64 data)
6011 {
6012         bool final_tried = false;
6013         int ret;
6014
6015         data = btrfs_get_alloc_profile(root, data);
6016 again:
6017         WARN_ON(num_bytes < root->sectorsize);
6018         ret = find_free_extent(trans, root, num_bytes, empty_size,
6019                                hint_byte, ins, data);
6020
6021         if (ret == -ENOSPC) {
6022                 if (!final_tried) {
6023                         num_bytes = num_bytes >> 1;
6024                         num_bytes = num_bytes & ~(root->sectorsize - 1);
6025                         num_bytes = max(num_bytes, min_alloc_size);
6026                         if (num_bytes == min_alloc_size)
6027                                 final_tried = true;
6028                         goto again;
6029                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6030                         struct btrfs_space_info *sinfo;
6031
6032                         sinfo = __find_space_info(root->fs_info, data);
6033                         printk(KERN_ERR "btrfs allocation failed flags %llu, "
6034                                "wanted %llu\n", (unsigned long long)data,
6035                                (unsigned long long)num_bytes);
6036                         if (sinfo)
6037                                 dump_space_info(sinfo, num_bytes, 1);
6038                 }
6039         }
6040
6041         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6042
6043         return ret;
6044 }
6045
6046 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6047                                         u64 start, u64 len, int pin)
6048 {
6049         struct btrfs_block_group_cache *cache;
6050         int ret = 0;
6051
6052         cache = btrfs_lookup_block_group(root->fs_info, start);
6053         if (!cache) {
6054                 printk(KERN_ERR "Unable to find block group for %llu\n",
6055                        (unsigned long long)start);
6056                 return -ENOSPC;
6057         }
6058
6059         if (btrfs_test_opt(root, DISCARD))
6060                 ret = btrfs_discard_extent(root, start, len, NULL);
6061
6062         if (pin)
6063                 pin_down_extent(root, cache, start, len, 1);
6064         else {
6065                 btrfs_add_free_space(cache, start, len);
6066                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6067         }
6068         btrfs_put_block_group(cache);
6069
6070         trace_btrfs_reserved_extent_free(root, start, len);
6071
6072         return ret;
6073 }
6074
6075 int btrfs_free_reserved_extent(struct btrfs_root *root,
6076                                         u64 start, u64 len)
6077 {
6078         return __btrfs_free_reserved_extent(root, start, len, 0);
6079 }
6080
6081 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6082                                        u64 start, u64 len)
6083 {
6084         return __btrfs_free_reserved_extent(root, start, len, 1);
6085 }
6086
6087 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6088                                       struct btrfs_root *root,
6089                                       u64 parent, u64 root_objectid,
6090                                       u64 flags, u64 owner, u64 offset,
6091                                       struct btrfs_key *ins, int ref_mod)
6092 {
6093         int ret;
6094         struct btrfs_fs_info *fs_info = root->fs_info;
6095         struct btrfs_extent_item *extent_item;
6096         struct btrfs_extent_inline_ref *iref;
6097         struct btrfs_path *path;
6098         struct extent_buffer *leaf;
6099         int type;
6100         u32 size;
6101
6102         if (parent > 0)
6103                 type = BTRFS_SHARED_DATA_REF_KEY;
6104         else
6105                 type = BTRFS_EXTENT_DATA_REF_KEY;
6106
6107         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6108
6109         path = btrfs_alloc_path();
6110         if (!path)
6111                 return -ENOMEM;
6112
6113         path->leave_spinning = 1;
6114         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6115                                       ins, size);
6116         if (ret) {
6117                 btrfs_free_path(path);
6118                 return ret;
6119         }
6120
6121         leaf = path->nodes[0];
6122         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6123                                      struct btrfs_extent_item);
6124         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6125         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6126         btrfs_set_extent_flags(leaf, extent_item,
6127                                flags | BTRFS_EXTENT_FLAG_DATA);
6128
6129         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6130         btrfs_set_extent_inline_ref_type(leaf, iref, type);
6131         if (parent > 0) {
6132                 struct btrfs_shared_data_ref *ref;
6133                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6134                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6135                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6136         } else {
6137                 struct btrfs_extent_data_ref *ref;
6138                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6139                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6140                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6141                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6142                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6143         }
6144
6145         btrfs_mark_buffer_dirty(path->nodes[0]);
6146         btrfs_free_path(path);
6147
6148         ret = update_block_group(root, ins->objectid, ins->offset, 1);
6149         if (ret) { /* -ENOENT, logic error */
6150                 printk(KERN_ERR "btrfs update block group failed for %llu "
6151                        "%llu\n", (unsigned long long)ins->objectid,
6152                        (unsigned long long)ins->offset);
6153                 BUG();
6154         }
6155         return ret;
6156 }
6157
6158 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6159                                      struct btrfs_root *root,
6160                                      u64 parent, u64 root_objectid,
6161                                      u64 flags, struct btrfs_disk_key *key,
6162                                      int level, struct btrfs_key *ins)
6163 {
6164         int ret;
6165         struct btrfs_fs_info *fs_info = root->fs_info;
6166         struct btrfs_extent_item *extent_item;
6167         struct btrfs_tree_block_info *block_info;
6168         struct btrfs_extent_inline_ref *iref;
6169         struct btrfs_path *path;
6170         struct extent_buffer *leaf;
6171         u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
6172
6173         path = btrfs_alloc_path();
6174         if (!path)
6175                 return -ENOMEM;
6176
6177         path->leave_spinning = 1;
6178         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6179                                       ins, size);
6180         if (ret) {
6181                 btrfs_free_path(path);
6182                 return ret;
6183         }
6184
6185         leaf = path->nodes[0];
6186         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6187                                      struct btrfs_extent_item);
6188         btrfs_set_extent_refs(leaf, extent_item, 1);
6189         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6190         btrfs_set_extent_flags(leaf, extent_item,
6191                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6192         block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6193
6194         btrfs_set_tree_block_key(leaf, block_info, key);
6195         btrfs_set_tree_block_level(leaf, block_info, level);
6196
6197         iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6198         if (parent > 0) {
6199                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6200                 btrfs_set_extent_inline_ref_type(leaf, iref,
6201                                                  BTRFS_SHARED_BLOCK_REF_KEY);
6202                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6203         } else {
6204                 btrfs_set_extent_inline_ref_type(leaf, iref,
6205                                                  BTRFS_TREE_BLOCK_REF_KEY);
6206                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6207         }
6208
6209         btrfs_mark_buffer_dirty(leaf);
6210         btrfs_free_path(path);
6211
6212         ret = update_block_group(root, ins->objectid, ins->offset, 1);
6213         if (ret) { /* -ENOENT, logic error */
6214                 printk(KERN_ERR "btrfs update block group failed for %llu "
6215                        "%llu\n", (unsigned long long)ins->objectid,
6216                        (unsigned long long)ins->offset);
6217                 BUG();
6218         }
6219         return ret;
6220 }
6221
6222 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6223                                      struct btrfs_root *root,
6224                                      u64 root_objectid, u64 owner,
6225                                      u64 offset, struct btrfs_key *ins)
6226 {
6227         int ret;
6228
6229         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6230
6231         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6232                                          ins->offset, 0,
6233                                          root_objectid, owner, offset,
6234                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6235         return ret;
6236 }
6237
6238 /*
6239  * this is used by the tree logging recovery code.  It records that
6240  * an extent has been allocated and makes sure to clear the free
6241  * space cache bits as well
6242  */
6243 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6244                                    struct btrfs_root *root,
6245                                    u64 root_objectid, u64 owner, u64 offset,
6246                                    struct btrfs_key *ins)
6247 {
6248         int ret;
6249         struct btrfs_block_group_cache *block_group;
6250         struct btrfs_caching_control *caching_ctl;
6251         u64 start = ins->objectid;
6252         u64 num_bytes = ins->offset;
6253
6254         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6255         cache_block_group(block_group, 0);
6256         caching_ctl = get_caching_control(block_group);
6257
6258         if (!caching_ctl) {
6259                 BUG_ON(!block_group_cache_done(block_group));
6260                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6261                 BUG_ON(ret); /* -ENOMEM */
6262         } else {
6263                 mutex_lock(&caching_ctl->mutex);
6264
6265                 if (start >= caching_ctl->progress) {
6266                         ret = add_excluded_extent(root, start, num_bytes);
6267                         BUG_ON(ret); /* -ENOMEM */
6268                 } else if (start + num_bytes <= caching_ctl->progress) {
6269                         ret = btrfs_remove_free_space(block_group,
6270                                                       start, num_bytes);
6271                         BUG_ON(ret); /* -ENOMEM */
6272                 } else {
6273                         num_bytes = caching_ctl->progress - start;
6274                         ret = btrfs_remove_free_space(block_group,
6275                                                       start, num_bytes);
6276                         BUG_ON(ret); /* -ENOMEM */
6277
6278                         start = caching_ctl->progress;
6279                         num_bytes = ins->objectid + ins->offset -
6280                                     caching_ctl->progress;
6281                         ret = add_excluded_extent(root, start, num_bytes);
6282                         BUG_ON(ret); /* -ENOMEM */
6283                 }
6284
6285                 mutex_unlock(&caching_ctl->mutex);
6286                 put_caching_control(caching_ctl);
6287         }
6288
6289         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6290                                           RESERVE_ALLOC_NO_ACCOUNT);
6291         BUG_ON(ret); /* logic error */
6292         btrfs_put_block_group(block_group);
6293         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6294                                          0, owner, offset, ins, 1);
6295         return ret;
6296 }
6297
6298 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
6299                                             struct btrfs_root *root,
6300                                             u64 bytenr, u32 blocksize,
6301                                             int level)
6302 {
6303         struct extent_buffer *buf;
6304
6305         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6306         if (!buf)
6307                 return ERR_PTR(-ENOMEM);
6308         btrfs_set_header_generation(buf, trans->transid);
6309         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6310         btrfs_tree_lock(buf);
6311         clean_tree_block(trans, root, buf);
6312         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6313
6314         btrfs_set_lock_blocking(buf);
6315         btrfs_set_buffer_uptodate(buf);
6316
6317         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6318                 /*
6319                  * we allow two log transactions at a time, use different
6320                  * EXENT bit to differentiate dirty pages.
6321                  */
6322                 if (root->log_transid % 2 == 0)
6323                         set_extent_dirty(&root->dirty_log_pages, buf->start,
6324                                         buf->start + buf->len - 1, GFP_NOFS);
6325                 else
6326                         set_extent_new(&root->dirty_log_pages, buf->start,
6327                                         buf->start + buf->len - 1, GFP_NOFS);
6328         } else {
6329                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6330                          buf->start + buf->len - 1, GFP_NOFS);
6331         }
6332         trans->blocks_used++;
6333         /* this returns a buffer locked for blocking */
6334         return buf;
6335 }
6336
6337 static struct btrfs_block_rsv *
6338 use_block_rsv(struct btrfs_trans_handle *trans,
6339               struct btrfs_root *root, u32 blocksize)
6340 {
6341         struct btrfs_block_rsv *block_rsv;
6342         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6343         int ret;
6344
6345         block_rsv = get_block_rsv(trans, root);
6346
6347         if (block_rsv->size == 0) {
6348                 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6349                                              BTRFS_RESERVE_NO_FLUSH);
6350                 /*
6351                  * If we couldn't reserve metadata bytes try and use some from
6352                  * the global reserve.
6353                  */
6354                 if (ret && block_rsv != global_rsv) {
6355                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6356                         if (!ret)
6357                                 return global_rsv;
6358                         return ERR_PTR(ret);
6359                 } else if (ret) {
6360                         return ERR_PTR(ret);
6361                 }
6362                 return block_rsv;
6363         }
6364
6365         ret = block_rsv_use_bytes(block_rsv, blocksize);
6366         if (!ret)
6367                 return block_rsv;
6368         if (ret && !block_rsv->failfast) {
6369                 static DEFINE_RATELIMIT_STATE(_rs,
6370                                 DEFAULT_RATELIMIT_INTERVAL,
6371                                 /*DEFAULT_RATELIMIT_BURST*/ 2);
6372                 if (__ratelimit(&_rs))
6373                         WARN(1, KERN_DEBUG "btrfs: block rsv returned %d\n",
6374                              ret);
6375                 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6376                                              BTRFS_RESERVE_NO_FLUSH);
6377                 if (!ret) {
6378                         return block_rsv;
6379                 } else if (ret && block_rsv != global_rsv) {
6380                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6381                         if (!ret)
6382                                 return global_rsv;
6383                 }
6384         }
6385
6386         return ERR_PTR(-ENOSPC);
6387 }
6388
6389 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6390                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
6391 {
6392         block_rsv_add_bytes(block_rsv, blocksize, 0);
6393         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6394 }
6395
6396 /*
6397  * finds a free extent and does all the dirty work required for allocation
6398  * returns the key for the extent through ins, and a tree buffer for
6399  * the first block of the extent through buf.
6400  *
6401  * returns the tree buffer or NULL.
6402  */
6403 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6404                                         struct btrfs_root *root, u32 blocksize,
6405                                         u64 parent, u64 root_objectid,
6406                                         struct btrfs_disk_key *key, int level,
6407                                         u64 hint, u64 empty_size)
6408 {
6409         struct btrfs_key ins;
6410         struct btrfs_block_rsv *block_rsv;
6411         struct extent_buffer *buf;
6412         u64 flags = 0;
6413         int ret;
6414
6415
6416         block_rsv = use_block_rsv(trans, root, blocksize);
6417         if (IS_ERR(block_rsv))
6418                 return ERR_CAST(block_rsv);
6419
6420         ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6421                                    empty_size, hint, &ins, 0);
6422         if (ret) {
6423                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6424                 return ERR_PTR(ret);
6425         }
6426
6427         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6428                                     blocksize, level);
6429         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6430
6431         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6432                 if (parent == 0)
6433                         parent = ins.objectid;
6434                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6435         } else
6436                 BUG_ON(parent > 0);
6437
6438         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6439                 struct btrfs_delayed_extent_op *extent_op;
6440                 extent_op = btrfs_alloc_delayed_extent_op();
6441                 BUG_ON(!extent_op); /* -ENOMEM */
6442                 if (key)
6443                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
6444                 else
6445                         memset(&extent_op->key, 0, sizeof(extent_op->key));
6446                 extent_op->flags_to_set = flags;
6447                 extent_op->update_key = 1;
6448                 extent_op->update_flags = 1;
6449                 extent_op->is_data = 0;
6450
6451                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6452                                         ins.objectid,
6453                                         ins.offset, parent, root_objectid,
6454                                         level, BTRFS_ADD_DELAYED_EXTENT,
6455                                         extent_op, 0);
6456                 BUG_ON(ret); /* -ENOMEM */
6457         }
6458         return buf;
6459 }
6460
6461 struct walk_control {
6462         u64 refs[BTRFS_MAX_LEVEL];
6463         u64 flags[BTRFS_MAX_LEVEL];
6464         struct btrfs_key update_progress;
6465         int stage;
6466         int level;
6467         int shared_level;
6468         int update_ref;
6469         int keep_locks;
6470         int reada_slot;
6471         int reada_count;
6472         int for_reloc;
6473 };
6474
6475 #define DROP_REFERENCE  1
6476 #define UPDATE_BACKREF  2
6477
6478 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6479                                      struct btrfs_root *root,
6480                                      struct walk_control *wc,
6481                                      struct btrfs_path *path)
6482 {
6483         u64 bytenr;
6484         u64 generation;
6485         u64 refs;
6486         u64 flags;
6487         u32 nritems;
6488         u32 blocksize;
6489         struct btrfs_key key;
6490         struct extent_buffer *eb;
6491         int ret;
6492         int slot;
6493         int nread = 0;
6494
6495         if (path->slots[wc->level] < wc->reada_slot) {
6496                 wc->reada_count = wc->reada_count * 2 / 3;
6497                 wc->reada_count = max(wc->reada_count, 2);
6498         } else {
6499                 wc->reada_count = wc->reada_count * 3 / 2;
6500                 wc->reada_count = min_t(int, wc->reada_count,
6501                                         BTRFS_NODEPTRS_PER_BLOCK(root));
6502         }
6503
6504         eb = path->nodes[wc->level];
6505         nritems = btrfs_header_nritems(eb);
6506         blocksize = btrfs_level_size(root, wc->level - 1);
6507
6508         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
6509                 if (nread >= wc->reada_count)
6510                         break;
6511
6512                 cond_resched();
6513                 bytenr = btrfs_node_blockptr(eb, slot);
6514                 generation = btrfs_node_ptr_generation(eb, slot);
6515
6516                 if (slot == path->slots[wc->level])
6517                         goto reada;
6518
6519                 if (wc->stage == UPDATE_BACKREF &&
6520                     generation <= root->root_key.offset)
6521                         continue;
6522
6523                 /* We don't lock the tree block, it's OK to be racy here */
6524                 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6525                                                &refs, &flags);
6526                 /* We don't care about errors in readahead. */
6527                 if (ret < 0)
6528                         continue;
6529                 BUG_ON(refs == 0);
6530
6531                 if (wc->stage == DROP_REFERENCE) {
6532                         if (refs == 1)
6533                                 goto reada;
6534
6535                         if (wc->level == 1 &&
6536                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6537                                 continue;
6538                         if (!wc->update_ref ||
6539                             generation <= root->root_key.offset)
6540                                 continue;
6541                         btrfs_node_key_to_cpu(eb, &key, slot);
6542                         ret = btrfs_comp_cpu_keys(&key,
6543                                                   &wc->update_progress);
6544                         if (ret < 0)
6545                                 continue;
6546                 } else {
6547                         if (wc->level == 1 &&
6548                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6549                                 continue;
6550                 }
6551 reada:
6552                 ret = readahead_tree_block(root, bytenr, blocksize,
6553                                            generation);
6554                 if (ret)
6555                         break;
6556                 nread++;
6557         }
6558         wc->reada_slot = slot;
6559 }
6560
6561 /*
6562  * hepler to process tree block while walking down the tree.
6563  *
6564  * when wc->stage == UPDATE_BACKREF, this function updates
6565  * back refs for pointers in the block.
6566  *
6567  * NOTE: return value 1 means we should stop walking down.
6568  */
6569 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6570                                    struct btrfs_root *root,
6571                                    struct btrfs_path *path,
6572                                    struct walk_control *wc, int lookup_info)
6573 {
6574         int level = wc->level;
6575         struct extent_buffer *eb = path->nodes[level];
6576         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6577         int ret;
6578
6579         if (wc->stage == UPDATE_BACKREF &&
6580             btrfs_header_owner(eb) != root->root_key.objectid)
6581                 return 1;
6582
6583         /*
6584          * when reference count of tree block is 1, it won't increase
6585          * again. once full backref flag is set, we never clear it.
6586          */
6587         if (lookup_info &&
6588             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
6589              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
6590                 BUG_ON(!path->locks[level]);
6591                 ret = btrfs_lookup_extent_info(trans, root,
6592                                                eb->start, eb->len,
6593                                                &wc->refs[level],
6594                                                &wc->flags[level]);
6595                 BUG_ON(ret == -ENOMEM);
6596                 if (ret)
6597                         return ret;
6598                 BUG_ON(wc->refs[level] == 0);
6599         }
6600
6601         if (wc->stage == DROP_REFERENCE) {
6602                 if (wc->refs[level] > 1)
6603                         return 1;
6604
6605                 if (path->locks[level] && !wc->keep_locks) {
6606                         btrfs_tree_unlock_rw(eb, path->locks[level]);
6607                         path->locks[level] = 0;
6608                 }
6609                 return 0;
6610         }
6611
6612         /* wc->stage == UPDATE_BACKREF */
6613         if (!(wc->flags[level] & flag)) {
6614                 BUG_ON(!path->locks[level]);
6615                 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
6616                 BUG_ON(ret); /* -ENOMEM */
6617                 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
6618                 BUG_ON(ret); /* -ENOMEM */
6619                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6620                                                   eb->len, flag, 0);
6621                 BUG_ON(ret); /* -ENOMEM */
6622                 wc->flags[level] |= flag;
6623         }
6624
6625         /*
6626          * the block is shared by multiple trees, so it's not good to
6627          * keep the tree lock
6628          */
6629         if (path->locks[level] && level > 0) {
6630                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6631                 path->locks[level] = 0;
6632         }
6633         return 0;
6634 }
6635
6636 /*
6637  * hepler to process tree block pointer.
6638  *
6639  * when wc->stage == DROP_REFERENCE, this function checks
6640  * reference count of the block pointed to. if the block
6641  * is shared and we need update back refs for the subtree
6642  * rooted at the block, this function changes wc->stage to
6643  * UPDATE_BACKREF. if the block is shared and there is no
6644  * need to update back, this function drops the reference
6645  * to the block.
6646  *
6647  * NOTE: return value 1 means we should stop walking down.
6648  */
6649 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6650                                  struct btrfs_root *root,
6651                                  struct btrfs_path *path,
6652                                  struct walk_control *wc, int *lookup_info)
6653 {
6654         u64 bytenr;
6655         u64 generation;
6656         u64 parent;
6657         u32 blocksize;
6658         struct btrfs_key key;
6659         struct extent_buffer *next;
6660         int level = wc->level;
6661         int reada = 0;
6662         int ret = 0;
6663
6664         generation = btrfs_node_ptr_generation(path->nodes[level],
6665                                                path->slots[level]);
6666         /*
6667          * if the lower level block was created before the snapshot
6668          * was created, we know there is no need to update back refs
6669          * for the subtree
6670          */
6671         if (wc->stage == UPDATE_BACKREF &&
6672             generation <= root->root_key.offset) {
6673                 *lookup_info = 1;
6674                 return 1;
6675         }
6676
6677         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
6678         blocksize = btrfs_level_size(root, level - 1);
6679
6680         next = btrfs_find_tree_block(root, bytenr, blocksize);
6681         if (!next) {
6682                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
6683                 if (!next)
6684                         return -ENOMEM;
6685                 reada = 1;
6686         }
6687         btrfs_tree_lock(next);
6688         btrfs_set_lock_blocking(next);
6689
6690         ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6691                                        &wc->refs[level - 1],
6692                                        &wc->flags[level - 1]);
6693         if (ret < 0) {
6694                 btrfs_tree_unlock(next);
6695                 return ret;
6696         }
6697
6698         BUG_ON(wc->refs[level - 1] == 0);
6699         *lookup_info = 0;
6700
6701         if (wc->stage == DROP_REFERENCE) {
6702                 if (wc->refs[level - 1] > 1) {
6703                         if (level == 1 &&
6704                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6705                                 goto skip;
6706
6707                         if (!wc->update_ref ||
6708                             generation <= root->root_key.offset)
6709                                 goto skip;
6710
6711                         btrfs_node_key_to_cpu(path->nodes[level], &key,
6712                                               path->slots[level]);
6713                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6714                         if (ret < 0)
6715                                 goto skip;
6716
6717                         wc->stage = UPDATE_BACKREF;
6718                         wc->shared_level = level - 1;
6719                 }
6720         } else {
6721                 if (level == 1 &&
6722                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6723                         goto skip;
6724         }
6725
6726         if (!btrfs_buffer_uptodate(next, generation, 0)) {
6727                 btrfs_tree_unlock(next);
6728                 free_extent_buffer(next);
6729                 next = NULL;
6730                 *lookup_info = 1;
6731         }
6732
6733         if (!next) {
6734                 if (reada && level == 1)
6735                         reada_walk_down(trans, root, wc, path);
6736                 next = read_tree_block(root, bytenr, blocksize, generation);
6737                 if (!next)
6738                         return -EIO;
6739                 btrfs_tree_lock(next);
6740                 btrfs_set_lock_blocking(next);
6741         }
6742
6743         level--;
6744         BUG_ON(level != btrfs_header_level(next));
6745         path->nodes[level] = next;
6746         path->slots[level] = 0;
6747         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6748         wc->level = level;
6749         if (wc->level == 1)
6750                 wc->reada_slot = 0;
6751         return 0;
6752 skip:
6753         wc->refs[level - 1] = 0;
6754         wc->flags[level - 1] = 0;
6755         if (wc->stage == DROP_REFERENCE) {
6756                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6757                         parent = path->nodes[level]->start;
6758                 } else {
6759                         BUG_ON(root->root_key.objectid !=
6760                                btrfs_header_owner(path->nodes[level]));
6761                         parent = 0;
6762                 }
6763
6764                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6765                                 root->root_key.objectid, level - 1, 0, 0);
6766                 BUG_ON(ret); /* -ENOMEM */
6767         }
6768         btrfs_tree_unlock(next);
6769         free_extent_buffer(next);
6770         *lookup_info = 1;
6771         return 1;
6772 }
6773
6774 /*
6775  * hepler to process tree block while walking up the tree.
6776  *
6777  * when wc->stage == DROP_REFERENCE, this function drops
6778  * reference count on the block.
6779  *
6780  * when wc->stage == UPDATE_BACKREF, this function changes
6781  * wc->stage back to DROP_REFERENCE if we changed wc->stage
6782  * to UPDATE_BACKREF previously while processing the block.
6783  *
6784  * NOTE: return value 1 means we should stop walking up.
6785  */
6786 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6787                                  struct btrfs_root *root,
6788                                  struct btrfs_path *path,
6789                                  struct walk_control *wc)
6790 {
6791         int ret;
6792         int level = wc->level;
6793         struct extent_buffer *eb = path->nodes[level];
6794         u64 parent = 0;
6795
6796         if (wc->stage == UPDATE_BACKREF) {
6797                 BUG_ON(wc->shared_level < level);
6798                 if (level < wc->shared_level)
6799                         goto out;
6800
6801                 ret = find_next_key(path, level + 1, &wc->update_progress);
6802                 if (ret > 0)
6803                         wc->update_ref = 0;
6804
6805                 wc->stage = DROP_REFERENCE;
6806                 wc->shared_level = -1;
6807                 path->slots[level] = 0;
6808
6809                 /*
6810                  * check reference count again if the block isn't locked.
6811                  * we should start walking down the tree again if reference
6812                  * count is one.
6813                  */
6814                 if (!path->locks[level]) {
6815                         BUG_ON(level == 0);
6816                         btrfs_tree_lock(eb);
6817                         btrfs_set_lock_blocking(eb);
6818                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6819
6820                         ret = btrfs_lookup_extent_info(trans, root,
6821                                                        eb->start, eb->len,
6822                                                        &wc->refs[level],
6823                                                        &wc->flags[level]);
6824                         if (ret < 0) {
6825                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6826                                 path->locks[level] = 0;
6827                                 return ret;
6828                         }
6829                         BUG_ON(wc->refs[level] == 0);
6830                         if (wc->refs[level] == 1) {
6831                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6832                                 path->locks[level] = 0;
6833                                 return 1;
6834                         }
6835                 }
6836         }
6837
6838         /* wc->stage == DROP_REFERENCE */
6839         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6840
6841         if (wc->refs[level] == 1) {
6842                 if (level == 0) {
6843                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6844                                 ret = btrfs_dec_ref(trans, root, eb, 1,
6845                                                     wc->for_reloc);
6846                         else
6847                                 ret = btrfs_dec_ref(trans, root, eb, 0,
6848                                                     wc->for_reloc);
6849                         BUG_ON(ret); /* -ENOMEM */
6850                 }
6851                 /* make block locked assertion in clean_tree_block happy */
6852                 if (!path->locks[level] &&
6853                     btrfs_header_generation(eb) == trans->transid) {
6854                         btrfs_tree_lock(eb);
6855                         btrfs_set_lock_blocking(eb);
6856                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6857                 }
6858                 clean_tree_block(trans, root, eb);
6859         }
6860
6861         if (eb == root->node) {
6862                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6863                         parent = eb->start;
6864                 else
6865                         BUG_ON(root->root_key.objectid !=
6866                                btrfs_header_owner(eb));
6867         } else {
6868                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6869                         parent = path->nodes[level + 1]->start;
6870                 else
6871                         BUG_ON(root->root_key.objectid !=
6872                                btrfs_header_owner(path->nodes[level + 1]));
6873         }
6874
6875         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
6876 out:
6877         wc->refs[level] = 0;
6878         wc->flags[level] = 0;
6879         return 0;
6880 }
6881
6882 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6883                                    struct btrfs_root *root,
6884                                    struct btrfs_path *path,
6885                                    struct walk_control *wc)
6886 {
6887         int level = wc->level;
6888         int lookup_info = 1;
6889         int ret;
6890
6891         while (level >= 0) {
6892                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
6893                 if (ret > 0)
6894                         break;
6895
6896                 if (level == 0)
6897                         break;
6898
6899                 if (path->slots[level] >=
6900                     btrfs_header_nritems(path->nodes[level]))
6901                         break;
6902
6903                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
6904                 if (ret > 0) {
6905                         path->slots[level]++;
6906                         continue;
6907                 } else if (ret < 0)
6908                         return ret;
6909                 level = wc->level;
6910         }
6911         return 0;
6912 }
6913
6914 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6915                                  struct btrfs_root *root,
6916                                  struct btrfs_path *path,
6917                                  struct walk_control *wc, int max_level)
6918 {
6919         int level = wc->level;
6920         int ret;
6921
6922         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6923         while (level < max_level && path->nodes[level]) {
6924                 wc->level = level;
6925                 if (path->slots[level] + 1 <
6926                     btrfs_header_nritems(path->nodes[level])) {
6927                         path->slots[level]++;
6928                         return 0;
6929                 } else {
6930                         ret = walk_up_proc(trans, root, path, wc);
6931                         if (ret > 0)
6932                                 return 0;
6933
6934                         if (path->locks[level]) {
6935                                 btrfs_tree_unlock_rw(path->nodes[level],
6936                                                      path->locks[level]);
6937                                 path->locks[level] = 0;
6938                         }
6939                         free_extent_buffer(path->nodes[level]);
6940                         path->nodes[level] = NULL;
6941                         level++;
6942                 }
6943         }
6944         return 1;
6945 }
6946
6947 /*
6948  * drop a subvolume tree.
6949  *
6950  * this function traverses the tree freeing any blocks that only
6951  * referenced by the tree.
6952  *
6953  * when a shared tree block is found. this function decreases its
6954  * reference count by one. if update_ref is true, this function
6955  * also make sure backrefs for the shared block and all lower level
6956  * blocks are properly updated.
6957  */
6958 int btrfs_drop_snapshot(struct btrfs_root *root,
6959                          struct btrfs_block_rsv *block_rsv, int update_ref,
6960                          int for_reloc)
6961 {
6962         struct btrfs_path *path;
6963         struct btrfs_trans_handle *trans;
6964         struct btrfs_root *tree_root = root->fs_info->tree_root;
6965         struct btrfs_root_item *root_item = &root->root_item;
6966         struct walk_control *wc;
6967         struct btrfs_key key;
6968         int err = 0;
6969         int ret;
6970         int level;
6971
6972         path = btrfs_alloc_path();
6973         if (!path) {
6974                 err = -ENOMEM;
6975                 goto out;
6976         }
6977
6978         wc = kzalloc(sizeof(*wc), GFP_NOFS);
6979         if (!wc) {
6980                 btrfs_free_path(path);
6981                 err = -ENOMEM;
6982                 goto out;
6983         }
6984
6985         trans = btrfs_start_transaction(tree_root, 0);
6986         if (IS_ERR(trans)) {
6987                 err = PTR_ERR(trans);
6988                 goto out_free;
6989         }
6990
6991         if (block_rsv)
6992                 trans->block_rsv = block_rsv;
6993
6994         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6995                 level = btrfs_header_level(root->node);
6996                 path->nodes[level] = btrfs_lock_root_node(root);
6997                 btrfs_set_lock_blocking(path->nodes[level]);
6998                 path->slots[level] = 0;
6999                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7000                 memset(&wc->update_progress, 0,
7001                        sizeof(wc->update_progress));
7002         } else {
7003                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7004                 memcpy(&wc->update_progress, &key,
7005                        sizeof(wc->update_progress));
7006
7007                 level = root_item->drop_level;
7008                 BUG_ON(level == 0);
7009                 path->lowest_level = level;
7010                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7011                 path->lowest_level = 0;
7012                 if (ret < 0) {
7013                         err = ret;
7014                         goto out_end_trans;
7015                 }
7016                 WARN_ON(ret > 0);
7017
7018                 /*
7019                  * unlock our path, this is safe because only this
7020                  * function is allowed to delete this snapshot
7021                  */
7022                 btrfs_unlock_up_safe(path, 0);
7023
7024                 level = btrfs_header_level(root->node);
7025                 while (1) {
7026                         btrfs_tree_lock(path->nodes[level]);
7027                         btrfs_set_lock_blocking(path->nodes[level]);
7028
7029                         ret = btrfs_lookup_extent_info(trans, root,
7030                                                 path->nodes[level]->start,
7031                                                 path->nodes[level]->len,
7032                                                 &wc->refs[level],
7033                                                 &wc->flags[level]);
7034                         if (ret < 0) {
7035                                 err = ret;
7036                                 goto out_end_trans;
7037                         }
7038                         BUG_ON(wc->refs[level] == 0);
7039
7040                         if (level == root_item->drop_level)
7041                                 break;
7042
7043                         btrfs_tree_unlock(path->nodes[level]);
7044                         WARN_ON(wc->refs[level] != 1);
7045                         level--;
7046                 }
7047         }
7048
7049         wc->level = level;
7050         wc->shared_level = -1;
7051         wc->stage = DROP_REFERENCE;
7052         wc->update_ref = update_ref;
7053         wc->keep_locks = 0;
7054         wc->for_reloc = for_reloc;
7055         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7056
7057         while (1) {
7058                 ret = walk_down_tree(trans, root, path, wc);
7059                 if (ret < 0) {
7060                         err = ret;
7061                         break;
7062                 }
7063
7064                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7065                 if (ret < 0) {
7066                         err = ret;
7067                         break;
7068                 }
7069
7070                 if (ret > 0) {
7071                         BUG_ON(wc->stage != DROP_REFERENCE);
7072                         break;
7073                 }
7074
7075                 if (wc->stage == DROP_REFERENCE) {
7076                         level = wc->level;
7077                         btrfs_node_key(path->nodes[level],
7078                                        &root_item->drop_progress,
7079                                        path->slots[level]);
7080                         root_item->drop_level = level;
7081                 }
7082
7083                 BUG_ON(wc->level == 0);
7084                 if (btrfs_should_end_transaction(trans, tree_root)) {
7085                         ret = btrfs_update_root(trans, tree_root,
7086                                                 &root->root_key,
7087                                                 root_item);
7088                         if (ret) {
7089                                 btrfs_abort_transaction(trans, tree_root, ret);
7090                                 err = ret;
7091                                 goto out_end_trans;
7092                         }
7093
7094                         btrfs_end_transaction_throttle(trans, tree_root);
7095                         trans = btrfs_start_transaction(tree_root, 0);
7096                         if (IS_ERR(trans)) {
7097                                 err = PTR_ERR(trans);
7098                                 goto out_free;
7099                         }
7100                         if (block_rsv)
7101                                 trans->block_rsv = block_rsv;
7102                 }
7103         }
7104         btrfs_release_path(path);
7105         if (err)
7106                 goto out_end_trans;
7107
7108         ret = btrfs_del_root(trans, tree_root, &root->root_key);
7109         if (ret) {
7110                 btrfs_abort_transaction(trans, tree_root, ret);
7111                 goto out_end_trans;
7112         }
7113
7114         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7115                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
7116                                            NULL, NULL);
7117                 if (ret < 0) {
7118                         btrfs_abort_transaction(trans, tree_root, ret);
7119                         err = ret;
7120                         goto out_end_trans;
7121                 } else if (ret > 0) {
7122                         /* if we fail to delete the orphan item this time
7123                          * around, it'll get picked up the next time.
7124                          *
7125                          * The most common failure here is just -ENOENT.
7126                          */
7127                         btrfs_del_orphan_item(trans, tree_root,
7128                                               root->root_key.objectid);
7129                 }
7130         }
7131
7132         if (root->in_radix) {
7133                 btrfs_free_fs_root(tree_root->fs_info, root);
7134         } else {
7135                 free_extent_buffer(root->node);
7136                 free_extent_buffer(root->commit_root);
7137                 kfree(root);
7138         }
7139 out_end_trans:
7140         btrfs_end_transaction_throttle(trans, tree_root);
7141 out_free:
7142         kfree(wc);
7143         btrfs_free_path(path);
7144 out:
7145         if (err)
7146                 btrfs_std_error(root->fs_info, err);
7147         return err;
7148 }
7149
7150 /*
7151  * drop subtree rooted at tree block 'node'.
7152  *
7153  * NOTE: this function will unlock and release tree block 'node'
7154  * only used by relocation code
7155  */
7156 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7157                         struct btrfs_root *root,
7158                         struct extent_buffer *node,
7159                         struct extent_buffer *parent)
7160 {
7161         struct btrfs_path *path;
7162         struct walk_control *wc;
7163         int level;
7164         int parent_level;
7165         int ret = 0;
7166         int wret;
7167
7168         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7169
7170         path = btrfs_alloc_path();
7171         if (!path)
7172                 return -ENOMEM;
7173
7174         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7175         if (!wc) {
7176                 btrfs_free_path(path);
7177                 return -ENOMEM;
7178         }
7179
7180         btrfs_assert_tree_locked(parent);
7181         parent_level = btrfs_header_level(parent);
7182         extent_buffer_get(parent);
7183         path->nodes[parent_level] = parent;
7184         path->slots[parent_level] = btrfs_header_nritems(parent);
7185
7186         btrfs_assert_tree_locked(node);
7187         level = btrfs_header_level(node);
7188         path->nodes[level] = node;
7189         path->slots[level] = 0;
7190         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7191
7192         wc->refs[parent_level] = 1;
7193         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7194         wc->level = level;
7195         wc->shared_level = -1;
7196         wc->stage = DROP_REFERENCE;
7197         wc->update_ref = 0;
7198         wc->keep_locks = 1;
7199         wc->for_reloc = 1;
7200         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7201
7202         while (1) {
7203                 wret = walk_down_tree(trans, root, path, wc);
7204                 if (wret < 0) {
7205                         ret = wret;
7206                         break;
7207                 }
7208
7209                 wret = walk_up_tree(trans, root, path, wc, parent_level);
7210                 if (wret < 0)
7211                         ret = wret;
7212                 if (wret != 0)
7213                         break;
7214         }
7215
7216         kfree(wc);
7217         btrfs_free_path(path);
7218         return ret;
7219 }
7220
7221 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7222 {
7223         u64 num_devices;
7224         u64 stripped;
7225
7226         /*
7227          * if restripe for this chunk_type is on pick target profile and
7228          * return, otherwise do the usual balance
7229          */
7230         stripped = get_restripe_target(root->fs_info, flags);
7231         if (stripped)
7232                 return extended_to_chunk(stripped);
7233
7234         /*
7235          * we add in the count of missing devices because we want
7236          * to make sure that any RAID levels on a degraded FS
7237          * continue to be honored.
7238          */
7239         num_devices = root->fs_info->fs_devices->rw_devices +
7240                 root->fs_info->fs_devices->missing_devices;
7241
7242         stripped = BTRFS_BLOCK_GROUP_RAID0 |
7243                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7244
7245         if (num_devices == 1) {
7246                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7247                 stripped = flags & ~stripped;
7248
7249                 /* turn raid0 into single device chunks */
7250                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7251                         return stripped;
7252
7253                 /* turn mirroring into duplication */
7254                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7255                              BTRFS_BLOCK_GROUP_RAID10))
7256                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7257         } else {
7258                 /* they already had raid on here, just return */
7259                 if (flags & stripped)
7260                         return flags;
7261
7262                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7263                 stripped = flags & ~stripped;
7264
7265                 /* switch duplicated blocks with raid1 */
7266                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7267                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7268
7269                 /* this is drive concat, leave it alone */
7270         }
7271
7272         return flags;
7273 }
7274
7275 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7276 {
7277         struct btrfs_space_info *sinfo = cache->space_info;
7278         u64 num_bytes;
7279         u64 min_allocable_bytes;
7280         int ret = -ENOSPC;
7281
7282
7283         /*
7284          * We need some metadata space and system metadata space for
7285          * allocating chunks in some corner cases until we force to set
7286          * it to be readonly.
7287          */
7288         if ((sinfo->flags &
7289              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7290             !force)
7291                 min_allocable_bytes = 1 * 1024 * 1024;
7292         else
7293                 min_allocable_bytes = 0;
7294
7295         spin_lock(&sinfo->lock);
7296         spin_lock(&cache->lock);
7297
7298         if (cache->ro) {
7299                 ret = 0;
7300                 goto out;
7301         }
7302
7303         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7304                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7305
7306         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7307             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7308             min_allocable_bytes <= sinfo->total_bytes) {
7309                 sinfo->bytes_readonly += num_bytes;
7310                 cache->ro = 1;
7311                 ret = 0;
7312         }
7313 out:
7314         spin_unlock(&cache->lock);
7315         spin_unlock(&sinfo->lock);
7316         return ret;
7317 }
7318
7319 int btrfs_set_block_group_ro(struct btrfs_root *root,
7320                              struct btrfs_block_group_cache *cache)
7321
7322 {
7323         struct btrfs_trans_handle *trans;
7324         u64 alloc_flags;
7325         int ret;
7326
7327         BUG_ON(cache->ro);
7328
7329         trans = btrfs_join_transaction(root);
7330         if (IS_ERR(trans))
7331                 return PTR_ERR(trans);
7332
7333         alloc_flags = update_block_group_flags(root, cache->flags);
7334         if (alloc_flags != cache->flags) {
7335                 ret = do_chunk_alloc(trans, root, alloc_flags,
7336                                      CHUNK_ALLOC_FORCE);
7337                 if (ret < 0)
7338                         goto out;
7339         }
7340
7341         ret = set_block_group_ro(cache, 0);
7342         if (!ret)
7343                 goto out;
7344         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7345         ret = do_chunk_alloc(trans, root, alloc_flags,
7346                              CHUNK_ALLOC_FORCE);
7347         if (ret < 0)
7348                 goto out;
7349         ret = set_block_group_ro(cache, 0);
7350 out:
7351         btrfs_end_transaction(trans, root);
7352         return ret;
7353 }
7354
7355 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7356                             struct btrfs_root *root, u64 type)
7357 {
7358         u64 alloc_flags = get_alloc_profile(root, type);
7359         return do_chunk_alloc(trans, root, alloc_flags,
7360                               CHUNK_ALLOC_FORCE);
7361 }
7362
7363 /*
7364  * helper to account the unused space of all the readonly block group in the
7365  * list. takes mirrors into account.
7366  */
7367 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7368 {
7369         struct btrfs_block_group_cache *block_group;
7370         u64 free_bytes = 0;
7371         int factor;
7372
7373         list_for_each_entry(block_group, groups_list, list) {
7374                 spin_lock(&block_group->lock);
7375
7376                 if (!block_group->ro) {
7377                         spin_unlock(&block_group->lock);
7378                         continue;
7379                 }
7380
7381                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7382                                           BTRFS_BLOCK_GROUP_RAID10 |
7383                                           BTRFS_BLOCK_GROUP_DUP))
7384                         factor = 2;
7385                 else
7386                         factor = 1;
7387
7388                 free_bytes += (block_group->key.offset -
7389                                btrfs_block_group_used(&block_group->item)) *
7390                                factor;
7391
7392                 spin_unlock(&block_group->lock);
7393         }
7394
7395         return free_bytes;
7396 }
7397
7398 /*
7399  * helper to account the unused space of all the readonly block group in the
7400  * space_info. takes mirrors into account.
7401  */
7402 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7403 {
7404         int i;
7405         u64 free_bytes = 0;
7406
7407         spin_lock(&sinfo->lock);
7408
7409         for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7410                 if (!list_empty(&sinfo->block_groups[i]))
7411                         free_bytes += __btrfs_get_ro_block_group_free_space(
7412                                                 &sinfo->block_groups[i]);
7413
7414         spin_unlock(&sinfo->lock);
7415
7416         return free_bytes;
7417 }
7418
7419 void btrfs_set_block_group_rw(struct btrfs_root *root,
7420                               struct btrfs_block_group_cache *cache)
7421 {
7422         struct btrfs_space_info *sinfo = cache->space_info;
7423         u64 num_bytes;
7424
7425         BUG_ON(!cache->ro);
7426
7427         spin_lock(&sinfo->lock);
7428         spin_lock(&cache->lock);
7429         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7430                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7431         sinfo->bytes_readonly -= num_bytes;
7432         cache->ro = 0;
7433         spin_unlock(&cache->lock);
7434         spin_unlock(&sinfo->lock);
7435 }
7436
7437 /*
7438  * checks to see if its even possible to relocate this block group.
7439  *
7440  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7441  * ok to go ahead and try.
7442  */
7443 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7444 {
7445         struct btrfs_block_group_cache *block_group;
7446         struct btrfs_space_info *space_info;
7447         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7448         struct btrfs_device *device;
7449         u64 min_free;
7450         u64 dev_min = 1;
7451         u64 dev_nr = 0;
7452         u64 target;
7453         int index;
7454         int full = 0;
7455         int ret = 0;
7456
7457         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7458
7459         /* odd, couldn't find the block group, leave it alone */
7460         if (!block_group)
7461                 return -1;
7462
7463         min_free = btrfs_block_group_used(&block_group->item);
7464
7465         /* no bytes used, we're good */
7466         if (!min_free)
7467                 goto out;
7468
7469         space_info = block_group->space_info;
7470         spin_lock(&space_info->lock);
7471
7472         full = space_info->full;
7473
7474         /*
7475          * if this is the last block group we have in this space, we can't
7476          * relocate it unless we're able to allocate a new chunk below.
7477          *
7478          * Otherwise, we need to make sure we have room in the space to handle
7479          * all of the extents from this block group.  If we can, we're good
7480          */
7481         if ((space_info->total_bytes != block_group->key.offset) &&
7482             (space_info->bytes_used + space_info->bytes_reserved +
7483              space_info->bytes_pinned + space_info->bytes_readonly +
7484              min_free < space_info->total_bytes)) {
7485                 spin_unlock(&space_info->lock);
7486                 goto out;
7487         }
7488         spin_unlock(&space_info->lock);
7489
7490         /*
7491          * ok we don't have enough space, but maybe we have free space on our
7492          * devices to allocate new chunks for relocation, so loop through our
7493          * alloc devices and guess if we have enough space.  if this block
7494          * group is going to be restriped, run checks against the target
7495          * profile instead of the current one.
7496          */
7497         ret = -1;
7498
7499         /*
7500          * index:
7501          *      0: raid10
7502          *      1: raid1
7503          *      2: dup
7504          *      3: raid0
7505          *      4: single
7506          */
7507         target = get_restripe_target(root->fs_info, block_group->flags);
7508         if (target) {
7509                 index = __get_raid_index(extended_to_chunk(target));
7510         } else {
7511                 /*
7512                  * this is just a balance, so if we were marked as full
7513                  * we know there is no space for a new chunk
7514                  */
7515                 if (full)
7516                         goto out;
7517
7518                 index = get_block_group_index(block_group);
7519         }
7520
7521         if (index == 0) {
7522                 dev_min = 4;
7523                 /* Divide by 2 */
7524                 min_free >>= 1;
7525         } else if (index == 1) {
7526                 dev_min = 2;
7527         } else if (index == 2) {
7528                 /* Multiply by 2 */
7529                 min_free <<= 1;
7530         } else if (index == 3) {
7531                 dev_min = fs_devices->rw_devices;
7532                 do_div(min_free, dev_min);
7533         }
7534
7535         mutex_lock(&root->fs_info->chunk_mutex);
7536         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7537                 u64 dev_offset;
7538
7539                 /*
7540                  * check to make sure we can actually find a chunk with enough
7541                  * space to fit our block group in.
7542                  */
7543                 if (device->total_bytes > device->bytes_used + min_free &&
7544                     !device->is_tgtdev_for_dev_replace) {
7545                         ret = find_free_dev_extent(device, min_free,
7546                                                    &dev_offset, NULL);
7547                         if (!ret)
7548                                 dev_nr++;
7549
7550                         if (dev_nr >= dev_min)
7551                                 break;
7552
7553                         ret = -1;
7554                 }
7555         }
7556         mutex_unlock(&root->fs_info->chunk_mutex);
7557 out:
7558         btrfs_put_block_group(block_group);
7559         return ret;
7560 }
7561
7562 static int find_first_block_group(struct btrfs_root *root,
7563                 struct btrfs_path *path, struct btrfs_key *key)
7564 {
7565         int ret = 0;
7566         struct btrfs_key found_key;
7567         struct extent_buffer *leaf;
7568         int slot;
7569
7570         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7571         if (ret < 0)
7572                 goto out;
7573
7574         while (1) {
7575                 slot = path->slots[0];
7576                 leaf = path->nodes[0];
7577                 if (slot >= btrfs_header_nritems(leaf)) {
7578                         ret = btrfs_next_leaf(root, path);
7579                         if (ret == 0)
7580                                 continue;
7581                         if (ret < 0)
7582                                 goto out;
7583                         break;
7584                 }
7585                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7586
7587                 if (found_key.objectid >= key->objectid &&
7588                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7589                         ret = 0;
7590                         goto out;
7591                 }
7592                 path->slots[0]++;
7593         }
7594 out:
7595         return ret;
7596 }
7597
7598 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
7599 {
7600         struct btrfs_block_group_cache *block_group;
7601         u64 last = 0;
7602
7603         while (1) {
7604                 struct inode *inode;
7605
7606                 block_group = btrfs_lookup_first_block_group(info, last);
7607                 while (block_group) {
7608                         spin_lock(&block_group->lock);
7609                         if (block_group->iref)
7610                                 break;
7611                         spin_unlock(&block_group->lock);
7612                         block_group = next_block_group(info->tree_root,
7613                                                        block_group);
7614                 }
7615                 if (!block_group) {
7616                         if (last == 0)
7617                                 break;
7618                         last = 0;
7619                         continue;
7620                 }
7621
7622                 inode = block_group->inode;
7623                 block_group->iref = 0;
7624                 block_group->inode = NULL;
7625                 spin_unlock(&block_group->lock);
7626                 iput(inode);
7627                 last = block_group->key.objectid + block_group->key.offset;
7628                 btrfs_put_block_group(block_group);
7629         }
7630 }
7631
7632 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7633 {
7634         struct btrfs_block_group_cache *block_group;
7635         struct btrfs_space_info *space_info;
7636         struct btrfs_caching_control *caching_ctl;
7637         struct rb_node *n;
7638
7639         down_write(&info->extent_commit_sem);
7640         while (!list_empty(&info->caching_block_groups)) {
7641                 caching_ctl = list_entry(info->caching_block_groups.next,
7642                                          struct btrfs_caching_control, list);
7643                 list_del(&caching_ctl->list);
7644                 put_caching_control(caching_ctl);
7645         }
7646         up_write(&info->extent_commit_sem);
7647
7648         spin_lock(&info->block_group_cache_lock);
7649         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7650                 block_group = rb_entry(n, struct btrfs_block_group_cache,
7651                                        cache_node);
7652                 rb_erase(&block_group->cache_node,
7653                          &info->block_group_cache_tree);
7654                 spin_unlock(&info->block_group_cache_lock);
7655
7656                 down_write(&block_group->space_info->groups_sem);
7657                 list_del(&block_group->list);
7658                 up_write(&block_group->space_info->groups_sem);
7659
7660                 if (block_group->cached == BTRFS_CACHE_STARTED)
7661                         wait_block_group_cache_done(block_group);
7662
7663                 /*
7664                  * We haven't cached this block group, which means we could
7665                  * possibly have excluded extents on this block group.
7666                  */
7667                 if (block_group->cached == BTRFS_CACHE_NO)
7668                         free_excluded_extents(info->extent_root, block_group);
7669
7670                 btrfs_remove_free_space_cache(block_group);
7671                 btrfs_put_block_group(block_group);
7672
7673                 spin_lock(&info->block_group_cache_lock);
7674         }
7675         spin_unlock(&info->block_group_cache_lock);
7676
7677         /* now that all the block groups are freed, go through and
7678          * free all the space_info structs.  This is only called during
7679          * the final stages of unmount, and so we know nobody is
7680          * using them.  We call synchronize_rcu() once before we start,
7681          * just to be on the safe side.
7682          */
7683         synchronize_rcu();
7684
7685         release_global_block_rsv(info);
7686
7687         while(!list_empty(&info->space_info)) {
7688                 space_info = list_entry(info->space_info.next,
7689                                         struct btrfs_space_info,
7690                                         list);
7691                 if (space_info->bytes_pinned > 0 ||
7692                     space_info->bytes_reserved > 0 ||
7693                     space_info->bytes_may_use > 0) {
7694                         WARN_ON(1);
7695                         dump_space_info(space_info, 0, 0);
7696                 }
7697                 list_del(&space_info->list);
7698                 kfree(space_info);
7699         }
7700         return 0;
7701 }
7702
7703 static void __link_block_group(struct btrfs_space_info *space_info,
7704                                struct btrfs_block_group_cache *cache)
7705 {
7706         int index = get_block_group_index(cache);
7707
7708         down_write(&space_info->groups_sem);
7709         list_add_tail(&cache->list, &space_info->block_groups[index]);
7710         up_write(&space_info->groups_sem);
7711 }
7712
7713 int btrfs_read_block_groups(struct btrfs_root *root)
7714 {
7715         struct btrfs_path *path;
7716         int ret;
7717         struct btrfs_block_group_cache *cache;
7718         struct btrfs_fs_info *info = root->fs_info;
7719         struct btrfs_space_info *space_info;
7720         struct btrfs_key key;
7721         struct btrfs_key found_key;
7722         struct extent_buffer *leaf;
7723         int need_clear = 0;
7724         u64 cache_gen;
7725
7726         root = info->extent_root;
7727         key.objectid = 0;
7728         key.offset = 0;
7729         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
7730         path = btrfs_alloc_path();
7731         if (!path)
7732                 return -ENOMEM;
7733         path->reada = 1;
7734
7735         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
7736         if (btrfs_test_opt(root, SPACE_CACHE) &&
7737             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
7738                 need_clear = 1;
7739         if (btrfs_test_opt(root, CLEAR_CACHE))
7740                 need_clear = 1;
7741
7742         while (1) {
7743                 ret = find_first_block_group(root, path, &key);
7744                 if (ret > 0)
7745                         break;
7746                 if (ret != 0)
7747                         goto error;
7748                 leaf = path->nodes[0];
7749                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7750                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
7751                 if (!cache) {
7752                         ret = -ENOMEM;
7753                         goto error;
7754                 }
7755                 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7756                                                 GFP_NOFS);
7757                 if (!cache->free_space_ctl) {
7758                         kfree(cache);
7759                         ret = -ENOMEM;
7760                         goto error;
7761                 }
7762
7763                 atomic_set(&cache->count, 1);
7764                 spin_lock_init(&cache->lock);
7765                 cache->fs_info = info;
7766                 INIT_LIST_HEAD(&cache->list);
7767                 INIT_LIST_HEAD(&cache->cluster_list);
7768
7769                 if (need_clear) {
7770                         /*
7771                          * When we mount with old space cache, we need to
7772                          * set BTRFS_DC_CLEAR and set dirty flag.
7773                          *
7774                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
7775                          *    truncate the old free space cache inode and
7776                          *    setup a new one.
7777                          * b) Setting 'dirty flag' makes sure that we flush
7778                          *    the new space cache info onto disk.
7779                          */
7780                         cache->disk_cache_state = BTRFS_DC_CLEAR;
7781                         if (btrfs_test_opt(root, SPACE_CACHE))
7782                                 cache->dirty = 1;
7783                 }
7784
7785                 read_extent_buffer(leaf, &cache->item,
7786                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
7787                                    sizeof(cache->item));
7788                 memcpy(&cache->key, &found_key, sizeof(found_key));
7789
7790                 key.objectid = found_key.objectid + found_key.offset;
7791                 btrfs_release_path(path);
7792                 cache->flags = btrfs_block_group_flags(&cache->item);
7793                 cache->sectorsize = root->sectorsize;
7794
7795                 btrfs_init_free_space_ctl(cache);
7796
7797                 /*
7798                  * We need to exclude the super stripes now so that the space
7799                  * info has super bytes accounted for, otherwise we'll think
7800                  * we have more space than we actually do.
7801                  */
7802                 exclude_super_stripes(root, cache);
7803
7804                 /*
7805                  * check for two cases, either we are full, and therefore
7806                  * don't need to bother with the caching work since we won't
7807                  * find any space, or we are empty, and we can just add all
7808                  * the space in and be done with it.  This saves us _alot_ of
7809                  * time, particularly in the full case.
7810                  */
7811                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7812                         cache->last_byte_to_unpin = (u64)-1;
7813                         cache->cached = BTRFS_CACHE_FINISHED;
7814                         free_excluded_extents(root, cache);
7815                 } else if (btrfs_block_group_used(&cache->item) == 0) {
7816                         cache->last_byte_to_unpin = (u64)-1;
7817                         cache->cached = BTRFS_CACHE_FINISHED;
7818                         add_new_free_space(cache, root->fs_info,
7819                                            found_key.objectid,
7820                                            found_key.objectid +
7821                                            found_key.offset);
7822                         free_excluded_extents(root, cache);
7823                 }
7824
7825                 ret = update_space_info(info, cache->flags, found_key.offset,
7826                                         btrfs_block_group_used(&cache->item),
7827                                         &space_info);
7828                 BUG_ON(ret); /* -ENOMEM */
7829                 cache->space_info = space_info;
7830                 spin_lock(&cache->space_info->lock);
7831                 cache->space_info->bytes_readonly += cache->bytes_super;
7832                 spin_unlock(&cache->space_info->lock);
7833
7834                 __link_block_group(space_info, cache);
7835
7836                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7837                 BUG_ON(ret); /* Logic error */
7838
7839                 set_avail_alloc_bits(root->fs_info, cache->flags);
7840                 if (btrfs_chunk_readonly(root, cache->key.objectid))
7841                         set_block_group_ro(cache, 1);
7842         }
7843
7844         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7845                 if (!(get_alloc_profile(root, space_info->flags) &
7846                       (BTRFS_BLOCK_GROUP_RAID10 |
7847                        BTRFS_BLOCK_GROUP_RAID1 |
7848                        BTRFS_BLOCK_GROUP_DUP)))
7849                         continue;
7850                 /*
7851                  * avoid allocating from un-mirrored block group if there are
7852                  * mirrored block groups.
7853                  */
7854                 list_for_each_entry(cache, &space_info->block_groups[3], list)
7855                         set_block_group_ro(cache, 1);
7856                 list_for_each_entry(cache, &space_info->block_groups[4], list)
7857                         set_block_group_ro(cache, 1);
7858         }
7859
7860         init_global_block_rsv(info);
7861         ret = 0;
7862 error:
7863         btrfs_free_path(path);
7864         return ret;
7865 }
7866
7867 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
7868                                        struct btrfs_root *root)
7869 {
7870         struct btrfs_block_group_cache *block_group, *tmp;
7871         struct btrfs_root *extent_root = root->fs_info->extent_root;
7872         struct btrfs_block_group_item item;
7873         struct btrfs_key key;
7874         int ret = 0;
7875
7876         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
7877                                  new_bg_list) {
7878                 list_del_init(&block_group->new_bg_list);
7879
7880                 if (ret)
7881                         continue;
7882
7883                 spin_lock(&block_group->lock);
7884                 memcpy(&item, &block_group->item, sizeof(item));
7885                 memcpy(&key, &block_group->key, sizeof(key));
7886                 spin_unlock(&block_group->lock);
7887
7888                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
7889                                         sizeof(item));
7890                 if (ret)
7891                         btrfs_abort_transaction(trans, extent_root, ret);
7892         }
7893 }
7894
7895 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7896                            struct btrfs_root *root, u64 bytes_used,
7897                            u64 type, u64 chunk_objectid, u64 chunk_offset,
7898                            u64 size)
7899 {
7900         int ret;
7901         struct btrfs_root *extent_root;
7902         struct btrfs_block_group_cache *cache;
7903
7904         extent_root = root->fs_info->extent_root;
7905
7906         root->fs_info->last_trans_log_full_commit = trans->transid;
7907
7908         cache = kzalloc(sizeof(*cache), GFP_NOFS);
7909         if (!cache)
7910                 return -ENOMEM;
7911         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7912                                         GFP_NOFS);
7913         if (!cache->free_space_ctl) {
7914                 kfree(cache);
7915                 return -ENOMEM;
7916         }
7917
7918         cache->key.objectid = chunk_offset;
7919         cache->key.offset = size;
7920         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7921         cache->sectorsize = root->sectorsize;
7922         cache->fs_info = root->fs_info;
7923
7924         atomic_set(&cache->count, 1);
7925         spin_lock_init(&cache->lock);
7926         INIT_LIST_HEAD(&cache->list);
7927         INIT_LIST_HEAD(&cache->cluster_list);
7928         INIT_LIST_HEAD(&cache->new_bg_list);
7929
7930         btrfs_init_free_space_ctl(cache);
7931
7932         btrfs_set_block_group_used(&cache->item, bytes_used);
7933         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7934         cache->flags = type;
7935         btrfs_set_block_group_flags(&cache->item, type);
7936
7937         cache->last_byte_to_unpin = (u64)-1;
7938         cache->cached = BTRFS_CACHE_FINISHED;
7939         exclude_super_stripes(root, cache);
7940
7941         add_new_free_space(cache, root->fs_info, chunk_offset,
7942                            chunk_offset + size);
7943
7944         free_excluded_extents(root, cache);
7945
7946         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7947                                 &cache->space_info);
7948         BUG_ON(ret); /* -ENOMEM */
7949         update_global_block_rsv(root->fs_info);
7950
7951         spin_lock(&cache->space_info->lock);
7952         cache->space_info->bytes_readonly += cache->bytes_super;
7953         spin_unlock(&cache->space_info->lock);
7954
7955         __link_block_group(cache->space_info, cache);
7956
7957         ret = btrfs_add_block_group_cache(root->fs_info, cache);
7958         BUG_ON(ret); /* Logic error */
7959
7960         list_add_tail(&cache->new_bg_list, &trans->new_bgs);
7961
7962         set_avail_alloc_bits(extent_root->fs_info, type);
7963
7964         return 0;
7965 }
7966
7967 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
7968 {
7969         u64 extra_flags = chunk_to_extended(flags) &
7970                                 BTRFS_EXTENDED_PROFILE_MASK;
7971
7972         if (flags & BTRFS_BLOCK_GROUP_DATA)
7973                 fs_info->avail_data_alloc_bits &= ~extra_flags;
7974         if (flags & BTRFS_BLOCK_GROUP_METADATA)
7975                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
7976         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
7977                 fs_info->avail_system_alloc_bits &= ~extra_flags;
7978 }
7979
7980 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7981                              struct btrfs_root *root, u64 group_start)
7982 {
7983         struct btrfs_path *path;
7984         struct btrfs_block_group_cache *block_group;
7985         struct btrfs_free_cluster *cluster;
7986         struct btrfs_root *tree_root = root->fs_info->tree_root;
7987         struct btrfs_key key;
7988         struct inode *inode;
7989         int ret;
7990         int index;
7991         int factor;
7992
7993         root = root->fs_info->extent_root;
7994
7995         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7996         BUG_ON(!block_group);
7997         BUG_ON(!block_group->ro);
7998
7999         /*
8000          * Free the reserved super bytes from this block group before
8001          * remove it.
8002          */
8003         free_excluded_extents(root, block_group);
8004
8005         memcpy(&key, &block_group->key, sizeof(key));
8006         index = get_block_group_index(block_group);
8007         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8008                                   BTRFS_BLOCK_GROUP_RAID1 |
8009                                   BTRFS_BLOCK_GROUP_RAID10))
8010                 factor = 2;
8011         else
8012                 factor = 1;
8013
8014         /* make sure this block group isn't part of an allocation cluster */
8015         cluster = &root->fs_info->data_alloc_cluster;
8016         spin_lock(&cluster->refill_lock);
8017         btrfs_return_cluster_to_free_space(block_group, cluster);
8018         spin_unlock(&cluster->refill_lock);
8019
8020         /*
8021          * make sure this block group isn't part of a metadata
8022          * allocation cluster
8023          */
8024         cluster = &root->fs_info->meta_alloc_cluster;
8025         spin_lock(&cluster->refill_lock);
8026         btrfs_return_cluster_to_free_space(block_group, cluster);
8027         spin_unlock(&cluster->refill_lock);
8028
8029         path = btrfs_alloc_path();
8030         if (!path) {
8031                 ret = -ENOMEM;
8032                 goto out;
8033         }
8034
8035         inode = lookup_free_space_inode(tree_root, block_group, path);
8036         if (!IS_ERR(inode)) {
8037                 ret = btrfs_orphan_add(trans, inode);
8038                 if (ret) {
8039                         btrfs_add_delayed_iput(inode);
8040                         goto out;
8041                 }
8042                 clear_nlink(inode);
8043                 /* One for the block groups ref */
8044                 spin_lock(&block_group->lock);
8045                 if (block_group->iref) {
8046                         block_group->iref = 0;
8047                         block_group->inode = NULL;
8048                         spin_unlock(&block_group->lock);
8049                         iput(inode);
8050                 } else {
8051                         spin_unlock(&block_group->lock);
8052                 }
8053                 /* One for our lookup ref */
8054                 btrfs_add_delayed_iput(inode);
8055         }
8056
8057         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8058         key.offset = block_group->key.objectid;
8059         key.type = 0;
8060
8061         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8062         if (ret < 0)
8063                 goto out;
8064         if (ret > 0)
8065                 btrfs_release_path(path);
8066         if (ret == 0) {
8067                 ret = btrfs_del_item(trans, tree_root, path);
8068                 if (ret)
8069                         goto out;
8070                 btrfs_release_path(path);
8071         }
8072
8073         spin_lock(&root->fs_info->block_group_cache_lock);
8074         rb_erase(&block_group->cache_node,
8075                  &root->fs_info->block_group_cache_tree);
8076
8077         if (root->fs_info->first_logical_byte == block_group->key.objectid)
8078                 root->fs_info->first_logical_byte = (u64)-1;
8079         spin_unlock(&root->fs_info->block_group_cache_lock);
8080
8081         down_write(&block_group->space_info->groups_sem);
8082         /*
8083          * we must use list_del_init so people can check to see if they
8084          * are still on the list after taking the semaphore
8085          */
8086         list_del_init(&block_group->list);
8087         if (list_empty(&block_group->space_info->block_groups[index]))
8088                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8089         up_write(&block_group->space_info->groups_sem);
8090
8091         if (block_group->cached == BTRFS_CACHE_STARTED)
8092                 wait_block_group_cache_done(block_group);
8093
8094         btrfs_remove_free_space_cache(block_group);
8095
8096         spin_lock(&block_group->space_info->lock);
8097         block_group->space_info->total_bytes -= block_group->key.offset;
8098         block_group->space_info->bytes_readonly -= block_group->key.offset;
8099         block_group->space_info->disk_total -= block_group->key.offset * factor;
8100         spin_unlock(&block_group->space_info->lock);
8101
8102         memcpy(&key, &block_group->key, sizeof(key));
8103
8104         btrfs_clear_space_info_full(root->fs_info);
8105
8106         btrfs_put_block_group(block_group);
8107         btrfs_put_block_group(block_group);
8108
8109         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8110         if (ret > 0)
8111                 ret = -EIO;
8112         if (ret < 0)
8113                 goto out;
8114
8115         ret = btrfs_del_item(trans, root, path);
8116 out:
8117         btrfs_free_path(path);
8118         return ret;
8119 }
8120
8121 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8122 {
8123         struct btrfs_space_info *space_info;
8124         struct btrfs_super_block *disk_super;
8125         u64 features;
8126         u64 flags;
8127         int mixed = 0;
8128         int ret;
8129
8130         disk_super = fs_info->super_copy;
8131         if (!btrfs_super_root(disk_super))
8132                 return 1;
8133
8134         features = btrfs_super_incompat_flags(disk_super);
8135         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8136                 mixed = 1;
8137
8138         flags = BTRFS_BLOCK_GROUP_SYSTEM;
8139         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8140         if (ret)
8141                 goto out;
8142
8143         if (mixed) {
8144                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8145                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8146         } else {
8147                 flags = BTRFS_BLOCK_GROUP_METADATA;
8148                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8149                 if (ret)
8150                         goto out;
8151
8152                 flags = BTRFS_BLOCK_GROUP_DATA;
8153                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8154         }
8155 out:
8156         return ret;
8157 }
8158
8159 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8160 {
8161         return unpin_extent_range(root, start, end);
8162 }
8163
8164 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8165                                u64 num_bytes, u64 *actual_bytes)
8166 {
8167         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8168 }
8169
8170 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8171 {
8172         struct btrfs_fs_info *fs_info = root->fs_info;
8173         struct btrfs_block_group_cache *cache = NULL;
8174         u64 group_trimmed;
8175         u64 start;
8176         u64 end;
8177         u64 trimmed = 0;
8178         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8179         int ret = 0;
8180
8181         /*
8182          * try to trim all FS space, our block group may start from non-zero.
8183          */
8184         if (range->len == total_bytes)
8185                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8186         else
8187                 cache = btrfs_lookup_block_group(fs_info, range->start);
8188
8189         while (cache) {
8190                 if (cache->key.objectid >= (range->start + range->len)) {
8191                         btrfs_put_block_group(cache);
8192                         break;
8193                 }
8194
8195                 start = max(range->start, cache->key.objectid);
8196                 end = min(range->start + range->len,
8197                                 cache->key.objectid + cache->key.offset);
8198
8199                 if (end - start >= range->minlen) {
8200                         if (!block_group_cache_done(cache)) {
8201                                 ret = cache_block_group(cache, 0);
8202                                 if (!ret)
8203                                         wait_block_group_cache_done(cache);
8204                         }
8205                         ret = btrfs_trim_block_group(cache,
8206                                                      &group_trimmed,
8207                                                      start,
8208                                                      end,
8209                                                      range->minlen);
8210
8211                         trimmed += group_trimmed;
8212                         if (ret) {
8213                                 btrfs_put_block_group(cache);
8214                                 break;
8215                         }
8216                 }
8217
8218                 cache = next_block_group(fs_info->tree_root, cache);
8219         }
8220
8221         range->len = trimmed;
8222         return ret;
8223 }