1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #ifndef BTRFS_VOLUMES_H
7 #define BTRFS_VOLUMES_H
10 #include <linux/sort.h>
11 #include <linux/btrfs.h>
12 #include "async-thread.h"
14 #define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G)
16 extern struct mutex uuid_mutex;
18 #define BTRFS_STRIPE_LEN SZ_64K
21 struct btrfs_pending_bios {
26 struct btrfs_io_geometry {
27 /* remaining bytes before crossing a stripe */
29 /* offset of logical address in chunk */
31 /* length of single IO stripe */
33 /* number of stripe where address falls */
35 /* offset of address in stripe */
37 /* offset of raid56 stripe into the chunk */
38 u64 raid56_stripe_offset;
42 * Use sequence counter to get consistent device stat data on
45 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
46 #include <linux/seqlock.h>
47 #define __BTRFS_NEED_DEVICE_DATA_ORDERED
48 #define btrfs_device_data_ordered_init(device) \
49 seqcount_init(&device->data_seqcount)
51 #define btrfs_device_data_ordered_init(device) do { } while (0)
54 #define BTRFS_DEV_STATE_WRITEABLE (0)
55 #define BTRFS_DEV_STATE_IN_FS_METADATA (1)
56 #define BTRFS_DEV_STATE_MISSING (2)
57 #define BTRFS_DEV_STATE_REPLACE_TGT (3)
58 #define BTRFS_DEV_STATE_FLUSH_SENT (4)
61 struct list_head dev_list; /* device_list_mutex */
62 struct list_head dev_alloc_list; /* chunk mutex */
63 struct list_head post_commit_list; /* chunk mutex */
64 struct btrfs_fs_devices *fs_devices;
65 struct btrfs_fs_info *fs_info;
67 struct rcu_string *name;
71 spinlock_t io_lock ____cacheline_aligned;
73 /* regular prio bios */
74 struct btrfs_pending_bios pending_bios;
76 struct btrfs_pending_bios pending_sync_bios;
78 struct block_device *bdev;
80 /* the mode sent to blkdev_get */
83 unsigned long dev_state;
84 blk_status_t last_flush_error;
86 #ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
87 seqcount_t data_seqcount;
90 /* the internal btrfs device id */
93 /* size of the device in memory */
96 /* size of the device on disk */
102 /* optimal io alignment for this device */
105 /* optimal io width for this device */
107 /* type and info about this device */
110 /* minimal io size for this device */
113 /* physical drive uuid (or lvm uuid) */
114 u8 uuid[BTRFS_UUID_SIZE];
117 * size of the device on the current transaction
119 * This variant is update when committing the transaction,
120 * and protected by chunk mutex
122 u64 commit_total_bytes;
124 /* bytes used on the current transaction */
125 u64 commit_bytes_used;
127 /* for sending down flush barriers */
128 struct bio *flush_bio;
129 struct completion flush_wait;
131 /* per-device scrub information */
132 struct scrub_ctx *scrub_ctx;
134 struct btrfs_work work;
136 /* readahead state */
137 atomic_t reada_in_flight;
139 struct reada_zone *reada_curr_zone;
140 struct radix_tree_root reada_zones;
141 struct radix_tree_root reada_extents;
143 /* disk I/O failure stats. For detailed description refer to
144 * enum btrfs_dev_stat_values in ioctl.h */
147 /* Counter to record the change of device stats */
148 atomic_t dev_stats_ccnt;
149 atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX];
151 struct extent_io_tree alloc_state;
155 * If we read those variants at the context of their own lock, we needn't
156 * use the following helpers, reading them directly is safe.
158 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
159 #define BTRFS_DEVICE_GETSET_FUNCS(name) \
161 btrfs_device_get_##name(const struct btrfs_device *dev) \
167 seq = read_seqcount_begin(&dev->data_seqcount); \
169 } while (read_seqcount_retry(&dev->data_seqcount, seq)); \
174 btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
177 write_seqcount_begin(&dev->data_seqcount); \
179 write_seqcount_end(&dev->data_seqcount); \
182 #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
183 #define BTRFS_DEVICE_GETSET_FUNCS(name) \
185 btrfs_device_get_##name(const struct btrfs_device *dev) \
196 btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
203 #define BTRFS_DEVICE_GETSET_FUNCS(name) \
205 btrfs_device_get_##name(const struct btrfs_device *dev) \
211 btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
217 BTRFS_DEVICE_GETSET_FUNCS(total_bytes);
218 BTRFS_DEVICE_GETSET_FUNCS(disk_total_bytes);
219 BTRFS_DEVICE_GETSET_FUNCS(bytes_used);
221 struct btrfs_fs_devices {
222 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
223 u8 metadata_uuid[BTRFS_FSID_SIZE];
225 struct list_head fs_list;
234 /* Highest generation number of seen devices */
235 u64 latest_generation;
237 struct block_device *latest_bdev;
239 /* all of the devices in the FS, protected by a mutex
240 * so we can safely walk it to write out the supers without
241 * worrying about add/remove by the multi-device code.
242 * Scrubbing super can kick off supers writing by holding
245 struct mutex device_list_mutex;
247 /* List of all devices, protected by device_list_mutex */
248 struct list_head devices;
251 * Devices which can satisfy space allocation. Protected by
254 struct list_head alloc_list;
256 struct btrfs_fs_devices *seed;
261 /* set when we find or add a device that doesn't have the
266 struct btrfs_fs_info *fs_info;
268 struct kobject fsid_kobj;
269 struct kobject *device_dir_kobj;
270 struct completion kobj_unregister;
273 #define BTRFS_BIO_INLINE_CSUM_SIZE 64
275 #define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info) \
276 - sizeof(struct btrfs_chunk)) \
277 / sizeof(struct btrfs_stripe) + 1)
279 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
280 - 2 * sizeof(struct btrfs_disk_key) \
281 - 2 * sizeof(struct btrfs_chunk)) \
282 / sizeof(struct btrfs_stripe) + 1)
285 * we need the mirror number and stripe index to be passed around
286 * the call chain while we are processing end_io (especially errors).
287 * Really, what we need is a btrfs_bio structure that has this info
288 * and is properly sized with its stripe array, but we're not there
289 * quite yet. We have our own btrfs bioset, and all of the bios
290 * we allocate are actually btrfs_io_bios. We'll cram as much of
291 * struct btrfs_bio as we can into this over time.
293 struct btrfs_io_bio {
294 unsigned int mirror_num;
295 unsigned int stripe_index;
298 u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
299 struct bvec_iter iter;
301 * This member must come last, bio_alloc_bioset will allocate enough
302 * bytes for entire btrfs_io_bio but relies on bio being last.
307 static inline struct btrfs_io_bio *btrfs_io_bio(struct bio *bio)
309 return container_of(bio, struct btrfs_io_bio, bio);
312 static inline void btrfs_io_bio_free_csum(struct btrfs_io_bio *io_bio)
314 if (io_bio->csum != io_bio->csum_inline) {
320 struct btrfs_bio_stripe {
321 struct btrfs_device *dev;
323 u64 length; /* only used for discard mappings */
328 atomic_t stripes_pending;
329 struct btrfs_fs_info *fs_info;
330 u64 map_type; /* get from map_lookup->type */
331 bio_end_io_t *end_io;
332 struct bio *orig_bio;
342 * logical block numbers for the start of each stripe
343 * The last one or two are p/q. These are sorted,
344 * so raid_map[0] is the start of our full stripe
347 struct btrfs_bio_stripe stripes[];
350 struct btrfs_device_info {
351 struct btrfs_device *dev;
357 struct btrfs_raid_attr {
358 u8 sub_stripes; /* sub_stripes info for map */
359 u8 dev_stripes; /* stripes per dev */
360 u8 devs_max; /* max devs to use */
361 u8 devs_min; /* min devs needed */
362 u8 tolerated_failures; /* max tolerated fail devs */
363 u8 devs_increment; /* ndevs has to be a multiple of this */
364 u8 ncopies; /* how many copies to data has */
365 u8 nparity; /* number of stripes worth of bytes to store
366 * parity information */
367 u8 mindev_error; /* error code if min devs requisite is unmet */
368 const char raid_name[8]; /* name of the raid */
369 u64 bg_flag; /* block group flag of the raid */
372 extern const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES];
381 int verified_stripes; /* For mount time dev extent verification */
382 struct btrfs_bio_stripe stripes[];
385 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
386 (sizeof(struct btrfs_bio_stripe) * (n)))
388 struct btrfs_balance_args;
389 struct btrfs_balance_progress;
390 struct btrfs_balance_control {
391 struct btrfs_balance_args data;
392 struct btrfs_balance_args meta;
393 struct btrfs_balance_args sys;
397 struct btrfs_balance_progress stat;
404 BTRFS_MAP_GET_READ_MIRRORS,
407 static inline enum btrfs_map_op btrfs_op(struct bio *bio)
409 switch (bio_op(bio)) {
411 return BTRFS_MAP_DISCARD;
413 return BTRFS_MAP_WRITE;
418 return BTRFS_MAP_READ;
422 void btrfs_get_bbio(struct btrfs_bio *bbio);
423 void btrfs_put_bbio(struct btrfs_bio *bbio);
424 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
425 u64 logical, u64 *length,
426 struct btrfs_bio **bbio_ret, int mirror_num);
427 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
428 u64 logical, u64 *length,
429 struct btrfs_bio **bbio_ret);
430 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
431 u64 logical, u64 len, struct btrfs_io_geometry *io_geom);
432 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
433 u64 physical, u64 **logical, int *naddrs, int *stripe_len);
434 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
435 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
436 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type);
437 void btrfs_mapping_tree_free(struct extent_map_tree *tree);
438 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
439 int mirror_num, int async_submit);
440 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
441 fmode_t flags, void *holder);
442 struct btrfs_device *btrfs_scan_one_device(const char *path,
443 fmode_t flags, void *holder);
444 int btrfs_forget_devices(const char *path);
445 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
446 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step);
447 void btrfs_assign_next_active_device(struct btrfs_device *device,
448 struct btrfs_device *this_dev);
449 struct btrfs_device *btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info,
451 const char *devpath);
452 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
455 void btrfs_free_device(struct btrfs_device *device);
456 int btrfs_rm_device(struct btrfs_fs_info *fs_info,
457 const char *device_path, u64 devid);
458 void __exit btrfs_cleanup_fs_uuids(void);
459 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
460 int btrfs_grow_device(struct btrfs_trans_handle *trans,
461 struct btrfs_device *device, u64 new_size);
462 struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
463 u64 devid, u8 *uuid, u8 *fsid, bool seed);
464 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
465 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path);
466 int btrfs_balance(struct btrfs_fs_info *fs_info,
467 struct btrfs_balance_control *bctl,
468 struct btrfs_ioctl_balance_args *bargs);
469 void btrfs_describe_block_groups(u64 flags, char *buf, u32 size_buf);
470 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
471 int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
472 int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
473 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
474 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
475 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info);
476 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset);
477 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
478 u64 *start, u64 *max_avail);
479 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
480 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
481 struct btrfs_ioctl_get_dev_stats *stats);
482 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
483 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
484 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans);
485 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev);
486 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev);
487 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev);
488 void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path);
489 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info,
490 u64 logical, u64 len);
491 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
493 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
494 u64 chunk_offset, u64 chunk_size);
495 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
496 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
497 u64 logical, u64 length);
499 static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
502 atomic_inc(dev->dev_stat_values + index);
504 * This memory barrier orders stores updating statistics before stores
505 * updating dev_stats_ccnt.
507 * It pairs with smp_rmb() in btrfs_run_dev_stats().
509 smp_mb__before_atomic();
510 atomic_inc(&dev->dev_stats_ccnt);
513 static inline int btrfs_dev_stat_read(struct btrfs_device *dev,
516 return atomic_read(dev->dev_stat_values + index);
519 static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
524 ret = atomic_xchg(dev->dev_stat_values + index, 0);
526 * atomic_xchg implies a full memory barriers as per atomic_t.txt:
527 * - RMW operations that have a return value are fully ordered;
529 * This implicit memory barriers is paired with the smp_rmb in
530 * btrfs_run_dev_stats
532 atomic_inc(&dev->dev_stats_ccnt);
536 static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
537 int index, unsigned long val)
539 atomic_set(dev->dev_stat_values + index, val);
541 * This memory barrier orders stores updating statistics before stores
542 * updating dev_stats_ccnt.
544 * It pairs with smp_rmb() in btrfs_run_dev_stats().
546 smp_mb__before_atomic();
547 atomic_inc(&dev->dev_stats_ccnt);
551 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
552 * can be used as index to access btrfs_raid_array[].
554 static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags)
556 if (flags & BTRFS_BLOCK_GROUP_RAID10)
557 return BTRFS_RAID_RAID10;
558 else if (flags & BTRFS_BLOCK_GROUP_RAID1)
559 return BTRFS_RAID_RAID1;
560 else if (flags & BTRFS_BLOCK_GROUP_DUP)
561 return BTRFS_RAID_DUP;
562 else if (flags & BTRFS_BLOCK_GROUP_RAID0)
563 return BTRFS_RAID_RAID0;
564 else if (flags & BTRFS_BLOCK_GROUP_RAID5)
565 return BTRFS_RAID_RAID5;
566 else if (flags & BTRFS_BLOCK_GROUP_RAID6)
567 return BTRFS_RAID_RAID6;
569 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
572 void btrfs_commit_device_sizes(struct btrfs_transaction *trans);
574 struct list_head *btrfs_get_fs_uuids(void);
575 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info);
576 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info);
577 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
578 struct btrfs_device *failing_dev);
580 int btrfs_bg_type_to_factor(u64 flags);
581 const char *btrfs_bg_type_to_raid_name(u64 flags);
582 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);