]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/lightnvm/pblk-read.c
lightnvm: pblk: use vmalloc for GC data buffer
[linux.git] / drivers / lightnvm / pblk-read.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-read.c - pblk's read path
16  */
17
18 #include "pblk.h"
19
20 /*
21  * There is no guarantee that the value read from cache has not been updated and
22  * resides at another location in the cache. We guarantee though that if the
23  * value is read from the cache, it belongs to the mapped lba. In order to
24  * guarantee and order between writes and reads are ordered, a flush must be
25  * issued.
26  */
27 static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
28                                 sector_t lba, struct ppa_addr ppa,
29                                 int bio_iter)
30 {
31 #ifdef CONFIG_NVM_DEBUG
32         /* Callers must ensure that the ppa points to a cache address */
33         BUG_ON(pblk_ppa_empty(ppa));
34         BUG_ON(!pblk_addr_in_cache(ppa));
35 #endif
36
37         return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba,
38                                         pblk_addr_to_cacheline(ppa), bio_iter);
39 }
40
41 static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
42                                  unsigned long *read_bitmap)
43 {
44         struct bio *bio = rqd->bio;
45         struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
46         sector_t blba = pblk_get_lba(bio);
47         int nr_secs = rqd->nr_ppas;
48         int advanced_bio = 0;
49         int i, j = 0;
50
51         /* logic error: lba out-of-bounds. Ignore read request */
52         if (blba + nr_secs >= pblk->rl.nr_secs) {
53                 WARN(1, "pblk: read lbas out of bounds\n");
54                 return;
55         }
56
57         pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
58
59         for (i = 0; i < nr_secs; i++) {
60                 struct ppa_addr p = ppas[i];
61                 sector_t lba = blba + i;
62
63 retry:
64                 if (pblk_ppa_empty(p)) {
65                         WARN_ON(test_and_set_bit(i, read_bitmap));
66                         continue;
67                 }
68
69                 /* Try to read from write buffer. The address is later checked
70                  * on the write buffer to prevent retrieving overwritten data.
71                  */
72                 if (pblk_addr_in_cache(p)) {
73                         if (!pblk_read_from_cache(pblk, bio, lba, p, i)) {
74                                 pblk_lookup_l2p_seq(pblk, &p, lba, 1);
75                                 goto retry;
76                         }
77                         WARN_ON(test_and_set_bit(i, read_bitmap));
78                         advanced_bio = 1;
79 #ifdef CONFIG_NVM_DEBUG
80                         atomic_long_inc(&pblk->cache_reads);
81 #endif
82                 } else {
83                         /* Read from media non-cached sectors */
84                         rqd->ppa_list[j++] = p;
85                 }
86
87                 if (advanced_bio)
88                         bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
89         }
90
91         if (pblk_io_aligned(pblk, nr_secs))
92                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
93         else
94                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
95
96 #ifdef CONFIG_NVM_DEBUG
97         atomic_long_add(nr_secs, &pblk->inflight_reads);
98 #endif
99 }
100
101 static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
102 {
103         int err;
104
105         err = pblk_submit_io(pblk, rqd);
106         if (err)
107                 return NVM_IO_ERR;
108
109         return NVM_IO_OK;
110 }
111
112 static void pblk_end_io_read(struct nvm_rq *rqd)
113 {
114         struct pblk *pblk = rqd->private;
115         struct nvm_tgt_dev *dev = pblk->dev;
116         struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
117         struct bio *bio = rqd->bio;
118
119         if (rqd->error)
120                 pblk_log_read_err(pblk, rqd);
121 #ifdef CONFIG_NVM_DEBUG
122         else
123                 WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
124 #endif
125
126         nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
127
128         bio_put(bio);
129         if (r_ctx->private) {
130                 struct bio *orig_bio = r_ctx->private;
131
132 #ifdef CONFIG_NVM_DEBUG
133                 WARN_ONCE(orig_bio->bi_status, "pblk: corrupted read bio\n");
134 #endif
135                 bio_endio(orig_bio);
136                 bio_put(orig_bio);
137         }
138
139 #ifdef CONFIG_NVM_DEBUG
140         atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
141         atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
142 #endif
143
144         pblk_free_rqd(pblk, rqd, READ);
145         atomic_dec(&pblk->inflight_io);
146 }
147
148 static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
149                                       unsigned int bio_init_idx,
150                                       unsigned long *read_bitmap)
151 {
152         struct bio *new_bio, *bio = rqd->bio;
153         struct bio_vec src_bv, dst_bv;
154         void *ppa_ptr = NULL;
155         void *src_p, *dst_p;
156         dma_addr_t dma_ppa_list = 0;
157         int nr_secs = rqd->nr_ppas;
158         int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
159         int i, ret, hole;
160         DECLARE_COMPLETION_ONSTACK(wait);
161
162         new_bio = bio_alloc(GFP_KERNEL, nr_holes);
163         if (!new_bio) {
164                 pr_err("pblk: could not alloc read bio\n");
165                 return NVM_IO_ERR;
166         }
167
168         if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
169                 goto err;
170
171         if (nr_holes != new_bio->bi_vcnt) {
172                 pr_err("pblk: malformed bio\n");
173                 goto err;
174         }
175
176         new_bio->bi_iter.bi_sector = 0; /* internal bio */
177         bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
178         new_bio->bi_private = &wait;
179         new_bio->bi_end_io = pblk_end_bio_sync;
180
181         rqd->bio = new_bio;
182         rqd->nr_ppas = nr_holes;
183         rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
184         rqd->end_io = NULL;
185
186         if (unlikely(nr_secs > 1 && nr_holes == 1)) {
187                 ppa_ptr = rqd->ppa_list;
188                 dma_ppa_list = rqd->dma_ppa_list;
189                 rqd->ppa_addr = rqd->ppa_list[0];
190         }
191
192         ret = pblk_submit_read_io(pblk, rqd);
193         if (ret) {
194                 bio_put(rqd->bio);
195                 pr_err("pblk: read IO submission failed\n");
196                 goto err;
197         }
198
199         if (!wait_for_completion_io_timeout(&wait,
200                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
201                 pr_err("pblk: partial read I/O timed out\n");
202         }
203
204         if (rqd->error) {
205                 atomic_long_inc(&pblk->read_failed);
206 #ifdef CONFIG_NVM_DEBUG
207                 pblk_print_failed_rqd(pblk, rqd, rqd->error);
208 #endif
209         }
210
211         if (unlikely(nr_secs > 1 && nr_holes == 1)) {
212                 rqd->ppa_list = ppa_ptr;
213                 rqd->dma_ppa_list = dma_ppa_list;
214         }
215
216         /* Fill the holes in the original bio */
217         i = 0;
218         hole = find_first_zero_bit(read_bitmap, nr_secs);
219         do {
220                 src_bv = new_bio->bi_io_vec[i++];
221                 dst_bv = bio->bi_io_vec[bio_init_idx + hole];
222
223                 src_p = kmap_atomic(src_bv.bv_page);
224                 dst_p = kmap_atomic(dst_bv.bv_page);
225
226                 memcpy(dst_p + dst_bv.bv_offset,
227                         src_p + src_bv.bv_offset,
228                         PBLK_EXPOSED_PAGE_SIZE);
229
230                 kunmap_atomic(src_p);
231                 kunmap_atomic(dst_p);
232
233                 mempool_free(src_bv.bv_page, pblk->page_pool);
234
235                 hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
236         } while (hole < nr_secs);
237
238         bio_put(new_bio);
239
240         /* Complete the original bio and associated request */
241         rqd->bio = bio;
242         rqd->nr_ppas = nr_secs;
243         rqd->private = pblk;
244
245         bio_endio(bio);
246         pblk_end_io_read(rqd);
247         return NVM_IO_OK;
248
249 err:
250         /* Free allocated pages in new bio */
251         pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
252         rqd->private = pblk;
253         pblk_end_io_read(rqd);
254         return NVM_IO_ERR;
255 }
256
257 static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
258                          unsigned long *read_bitmap)
259 {
260         struct bio *bio = rqd->bio;
261         struct ppa_addr ppa;
262         sector_t lba = pblk_get_lba(bio);
263
264         /* logic error: lba out-of-bounds. Ignore read request */
265         if (lba >= pblk->rl.nr_secs) {
266                 WARN(1, "pblk: read lba out of bounds\n");
267                 return;
268         }
269
270         pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
271
272 #ifdef CONFIG_NVM_DEBUG
273         atomic_long_inc(&pblk->inflight_reads);
274 #endif
275
276 retry:
277         if (pblk_ppa_empty(ppa)) {
278                 WARN_ON(test_and_set_bit(0, read_bitmap));
279                 return;
280         }
281
282         /* Try to read from write buffer. The address is later checked on the
283          * write buffer to prevent retrieving overwritten data.
284          */
285         if (pblk_addr_in_cache(ppa)) {
286                 if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0)) {
287                         pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
288                         goto retry;
289                 }
290                 WARN_ON(test_and_set_bit(0, read_bitmap));
291 #ifdef CONFIG_NVM_DEBUG
292                         atomic_long_inc(&pblk->cache_reads);
293 #endif
294         } else {
295                 rqd->ppa_addr = ppa;
296         }
297
298         rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
299 }
300
301 int pblk_submit_read(struct pblk *pblk, struct bio *bio)
302 {
303         struct nvm_tgt_dev *dev = pblk->dev;
304         unsigned int nr_secs = pblk_get_secs(bio);
305         struct nvm_rq *rqd;
306         unsigned long read_bitmap; /* Max 64 ppas per request */
307         unsigned int bio_init_idx;
308         int ret = NVM_IO_ERR;
309
310         if (nr_secs > PBLK_MAX_REQ_ADDRS)
311                 return NVM_IO_ERR;
312
313         bitmap_zero(&read_bitmap, nr_secs);
314
315         rqd = pblk_alloc_rqd(pblk, READ);
316         if (IS_ERR(rqd)) {
317                 pr_err_ratelimited("pblk: not able to alloc rqd");
318                 return NVM_IO_ERR;
319         }
320
321         rqd->opcode = NVM_OP_PREAD;
322         rqd->bio = bio;
323         rqd->nr_ppas = nr_secs;
324         rqd->private = pblk;
325         rqd->end_io = pblk_end_io_read;
326
327         /* Save the index for this bio's start. This is needed in case
328          * we need to fill a partial read.
329          */
330         bio_init_idx = pblk_get_bi_idx(bio);
331
332         rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
333                                                         &rqd->dma_meta_list);
334         if (!rqd->meta_list) {
335                 pr_err("pblk: not able to allocate ppa list\n");
336                 goto fail_rqd_free;
337         }
338
339         if (nr_secs > 1) {
340                 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
341                 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
342
343                 pblk_read_ppalist_rq(pblk, rqd, &read_bitmap);
344         } else {
345                 pblk_read_rq(pblk, rqd, &read_bitmap);
346         }
347
348         bio_get(bio);
349         if (bitmap_full(&read_bitmap, nr_secs)) {
350                 bio_endio(bio);
351                 atomic_inc(&pblk->inflight_io);
352                 pblk_end_io_read(rqd);
353                 return NVM_IO_OK;
354         }
355
356         /* All sectors are to be read from the device */
357         if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
358                 struct bio *int_bio = NULL;
359                 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
360
361                 /* Clone read bio to deal with read errors internally */
362                 int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
363                 if (!int_bio) {
364                         pr_err("pblk: could not clone read bio\n");
365                         return NVM_IO_ERR;
366                 }
367
368                 rqd->bio = int_bio;
369                 r_ctx->private = bio;
370
371                 ret = pblk_submit_read_io(pblk, rqd);
372                 if (ret) {
373                         pr_err("pblk: read IO submission failed\n");
374                         if (int_bio)
375                                 bio_put(int_bio);
376                         return ret;
377                 }
378
379                 return NVM_IO_OK;
380         }
381
382         /* The read bio request could be partially filled by the write buffer,
383          * but there are some holes that need to be read from the drive.
384          */
385         ret = pblk_fill_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
386         if (ret) {
387                 pr_err("pblk: failed to perform partial read\n");
388                 return ret;
389         }
390
391         return NVM_IO_OK;
392
393 fail_rqd_free:
394         pblk_free_rqd(pblk, rqd, READ);
395         return ret;
396 }
397
398 static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
399                               struct pblk_line *line, u64 *lba_list,
400                               unsigned int nr_secs)
401 {
402         struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
403         int valid_secs = 0;
404         int i;
405
406         pblk_lookup_l2p_rand(pblk, ppas, lba_list, nr_secs);
407
408         for (i = 0; i < nr_secs; i++) {
409                 if (pblk_addr_in_cache(ppas[i]) || ppas[i].g.blk != line->id ||
410                                                 pblk_ppa_empty(ppas[i])) {
411                         lba_list[i] = ADDR_EMPTY;
412                         continue;
413                 }
414
415                 rqd->ppa_list[valid_secs++] = ppas[i];
416         }
417
418 #ifdef CONFIG_NVM_DEBUG
419         atomic_long_add(valid_secs, &pblk->inflight_reads);
420 #endif
421         return valid_secs;
422 }
423
424 static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
425                       struct pblk_line *line, sector_t lba)
426 {
427         struct ppa_addr ppa;
428         int valid_secs = 0;
429
430         if (lba == ADDR_EMPTY)
431                 goto out;
432
433         /* logic error: lba out-of-bounds */
434         if (lba >= pblk->rl.nr_secs) {
435                 WARN(1, "pblk: read lba out of bounds\n");
436                 goto out;
437         }
438
439         spin_lock(&pblk->trans_lock);
440         ppa = pblk_trans_map_get(pblk, lba);
441         spin_unlock(&pblk->trans_lock);
442
443         /* Ignore updated values until the moment */
444         if (pblk_addr_in_cache(ppa) || ppa.g.blk != line->id ||
445                                                         pblk_ppa_empty(ppa))
446                 goto out;
447
448         rqd->ppa_addr = ppa;
449         valid_secs = 1;
450
451 #ifdef CONFIG_NVM_DEBUG
452         atomic_long_inc(&pblk->inflight_reads);
453 #endif
454
455 out:
456         return valid_secs;
457 }
458
459 int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
460                         unsigned int nr_secs, unsigned int *secs_to_gc,
461                         struct pblk_line *line)
462 {
463         struct nvm_tgt_dev *dev = pblk->dev;
464         struct nvm_geo *geo = &dev->geo;
465         struct bio *bio;
466         struct nvm_rq rqd;
467         int ret, data_len;
468         DECLARE_COMPLETION_ONSTACK(wait);
469
470         memset(&rqd, 0, sizeof(struct nvm_rq));
471
472         rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
473                                                         &rqd.dma_meta_list);
474         if (!rqd.meta_list)
475                 return NVM_IO_ERR;
476
477         if (nr_secs > 1) {
478                 rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
479                 rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
480
481                 *secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, line, lba_list,
482                                                                 nr_secs);
483                 if (*secs_to_gc == 1)
484                         rqd.ppa_addr = rqd.ppa_list[0];
485         } else {
486                 *secs_to_gc = read_rq_gc(pblk, &rqd, line, lba_list[0]);
487         }
488
489         if (!(*secs_to_gc))
490                 goto out;
491
492         data_len = (*secs_to_gc) * geo->sec_size;
493         bio = pblk_bio_map_addr(pblk, data, *secs_to_gc, data_len,
494                                                 PBLK_KMALLOC_META, GFP_KERNEL);
495         if (IS_ERR(bio)) {
496                 pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
497                 goto err_free_dma;
498         }
499
500         bio->bi_iter.bi_sector = 0; /* internal bio */
501         bio_set_op_attrs(bio, REQ_OP_READ, 0);
502
503         rqd.opcode = NVM_OP_PREAD;
504         rqd.end_io = pblk_end_io_sync;
505         rqd.private = &wait;
506         rqd.nr_ppas = *secs_to_gc;
507         rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
508         rqd.bio = bio;
509
510         ret = pblk_submit_read_io(pblk, &rqd);
511         if (ret) {
512                 bio_endio(bio);
513                 pr_err("pblk: GC read request failed\n");
514                 goto err_free_dma;
515         }
516
517         if (!wait_for_completion_io_timeout(&wait,
518                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
519                 pr_err("pblk: GC read I/O timed out\n");
520         }
521         atomic_dec(&pblk->inflight_io);
522
523         if (rqd.error) {
524                 atomic_long_inc(&pblk->read_failed_gc);
525 #ifdef CONFIG_NVM_DEBUG
526                 pblk_print_failed_rqd(pblk, &rqd, rqd.error);
527 #endif
528         }
529
530 #ifdef CONFIG_NVM_DEBUG
531         atomic_long_add(*secs_to_gc, &pblk->sync_reads);
532         atomic_long_add(*secs_to_gc, &pblk->recov_gc_reads);
533         atomic_long_sub(*secs_to_gc, &pblk->inflight_reads);
534 #endif
535
536 out:
537         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
538         return NVM_IO_OK;
539
540 err_free_dma:
541         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
542         return NVM_IO_ERR;
543 }