]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/md/bcache/io.c
wil6210: rate limit wil_rx_refill error
[linux.git] / drivers / md / bcache / io.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Some low level IO code, and hacks for various block layer limitations
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8
9 #include "bcache.h"
10 #include "bset.h"
11 #include "debug.h"
12
13 #include <linux/blkdev.h>
14
15 /* Bios with headers */
16
17 void bch_bbio_free(struct bio *bio, struct cache_set *c)
18 {
19         struct bbio *b = container_of(bio, struct bbio, bio);
20         mempool_free(b, c->bio_meta);
21 }
22
23 struct bio *bch_bbio_alloc(struct cache_set *c)
24 {
25         struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
26         struct bio *bio = &b->bio;
27
28         bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
29
30         return bio;
31 }
32
33 void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
34 {
35         struct bbio *b = container_of(bio, struct bbio, bio);
36
37         bio->bi_iter.bi_sector  = PTR_OFFSET(&b->key, 0);
38         bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev);
39
40         b->submit_time_us = local_clock_us();
41         closure_bio_submit(c, bio, bio->bi_private);
42 }
43
44 void bch_submit_bbio(struct bio *bio, struct cache_set *c,
45                      struct bkey *k, unsigned ptr)
46 {
47         struct bbio *b = container_of(bio, struct bbio, bio);
48         bch_bkey_copy_single_ptr(&b->key, k, ptr);
49         __bch_submit_bbio(bio, c);
50 }
51
52 /* IO errors */
53 void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
54 {
55         char buf[BDEVNAME_SIZE];
56         unsigned errors;
57
58         WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
59
60         errors = atomic_add_return(1, &dc->io_errors);
61         if (errors < dc->error_limit)
62                 pr_err("%s: IO error on backing device, unrecoverable",
63                         bio_devname(bio, buf));
64         else
65                 bch_cached_dev_error(dc);
66 }
67
68 void bch_count_io_errors(struct cache *ca,
69                          blk_status_t error,
70                          int is_read,
71                          const char *m)
72 {
73         /*
74          * The halflife of an error is:
75          * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
76          */
77
78         if (ca->set->error_decay) {
79                 unsigned count = atomic_inc_return(&ca->io_count);
80
81                 while (count > ca->set->error_decay) {
82                         unsigned errors;
83                         unsigned old = count;
84                         unsigned new = count - ca->set->error_decay;
85
86                         /*
87                          * First we subtract refresh from count; each time we
88                          * succesfully do so, we rescale the errors once:
89                          */
90
91                         count = atomic_cmpxchg(&ca->io_count, old, new);
92
93                         if (count == old) {
94                                 count = new;
95
96                                 errors = atomic_read(&ca->io_errors);
97                                 do {
98                                         old = errors;
99                                         new = ((uint64_t) errors * 127) / 128;
100                                         errors = atomic_cmpxchg(&ca->io_errors,
101                                                                 old, new);
102                                 } while (old != errors);
103                         }
104                 }
105         }
106
107         if (error) {
108                 char buf[BDEVNAME_SIZE];
109                 unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
110                                                     &ca->io_errors);
111                 errors >>= IO_ERROR_SHIFT;
112
113                 if (errors < ca->set->error_limit)
114                         pr_err("%s: IO error on %s%s",
115                                bdevname(ca->bdev, buf), m,
116                                is_read ? ", recovering." : ".");
117                 else
118                         bch_cache_set_error(ca->set,
119                                             "%s: too many IO errors %s",
120                                             bdevname(ca->bdev, buf), m);
121         }
122 }
123
124 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
125                               blk_status_t error, const char *m)
126 {
127         struct bbio *b = container_of(bio, struct bbio, bio);
128         struct cache *ca = PTR_CACHE(c, &b->key, 0);
129         int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
130
131         unsigned threshold = op_is_write(bio_op(bio))
132                 ? c->congested_write_threshold_us
133                 : c->congested_read_threshold_us;
134
135         if (threshold) {
136                 unsigned t = local_clock_us();
137
138                 int us = t - b->submit_time_us;
139                 int congested = atomic_read(&c->congested);
140
141                 if (us > (int) threshold) {
142                         int ms = us / 1024;
143                         c->congested_last_us = t;
144
145                         ms = min(ms, CONGESTED_MAX + congested);
146                         atomic_sub(ms, &c->congested);
147                 } else if (congested < 0)
148                         atomic_inc(&c->congested);
149         }
150
151         bch_count_io_errors(ca, error, is_read, m);
152 }
153
154 void bch_bbio_endio(struct cache_set *c, struct bio *bio,
155                     blk_status_t error, const char *m)
156 {
157         struct closure *cl = bio->bi_private;
158
159         bch_bbio_count_io_errors(c, bio, error, m);
160         bio_put(bio);
161         closure_put(cl);
162 }