]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/lightnvm/pblk-write.c
lightnvm: pblk: guarantee that backpointer is respected on writer stall
[linux.git] / drivers / lightnvm / pblk-write.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016 CNEX Labs
4  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5  *                  Matias Bjorling <matias@cnexlabs.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * pblk-write.c - pblk's write path from write buffer to media
17  */
18
19 #include "pblk.h"
20 #include "pblk-trace.h"
21
22 static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
23                                     struct pblk_c_ctx *c_ctx)
24 {
25         struct bio *original_bio;
26         struct pblk_rb *rwb = &pblk->rwb;
27         unsigned long ret;
28         int i;
29
30         for (i = 0; i < c_ctx->nr_valid; i++) {
31                 struct pblk_w_ctx *w_ctx;
32                 int pos = c_ctx->sentry + i;
33                 int flags;
34
35                 w_ctx = pblk_rb_w_ctx(rwb, pos);
36                 flags = READ_ONCE(w_ctx->flags);
37
38                 if (flags & PBLK_FLUSH_ENTRY) {
39                         flags &= ~PBLK_FLUSH_ENTRY;
40                         /* Release flags on context. Protect from writes */
41                         smp_store_release(&w_ctx->flags, flags);
42
43 #ifdef CONFIG_NVM_PBLK_DEBUG
44                         atomic_dec(&rwb->inflight_flush_point);
45 #endif
46                 }
47
48                 while ((original_bio = bio_list_pop(&w_ctx->bios)))
49                         bio_endio(original_bio);
50         }
51
52         if (c_ctx->nr_padded)
53                 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
54                                                         c_ctx->nr_padded);
55
56 #ifdef CONFIG_NVM_PBLK_DEBUG
57         atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
58 #endif
59
60         ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
61
62         bio_put(rqd->bio);
63         pblk_free_rqd(pblk, rqd, PBLK_WRITE);
64
65         return ret;
66 }
67
68 static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
69                                            struct nvm_rq *rqd,
70                                            struct pblk_c_ctx *c_ctx)
71 {
72         list_del(&c_ctx->list);
73         return pblk_end_w_bio(pblk, rqd, c_ctx);
74 }
75
76 static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
77                                 struct pblk_c_ctx *c_ctx)
78 {
79         struct pblk_c_ctx *c, *r;
80         unsigned long flags;
81         unsigned long pos;
82
83 #ifdef CONFIG_NVM_PBLK_DEBUG
84         atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
85 #endif
86         pblk_up_rq(pblk, c_ctx->lun_bitmap);
87
88         pos = pblk_rb_sync_init(&pblk->rwb, &flags);
89         if (pos == c_ctx->sentry) {
90                 pos = pblk_end_w_bio(pblk, rqd, c_ctx);
91
92 retry:
93                 list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
94                         rqd = nvm_rq_from_c_ctx(c);
95                         if (c->sentry == pos) {
96                                 pos = pblk_end_queued_w_bio(pblk, rqd, c);
97                                 goto retry;
98                         }
99                 }
100         } else {
101                 WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
102                 list_add_tail(&c_ctx->list, &pblk->compl_list);
103         }
104         pblk_rb_sync_end(&pblk->rwb, &flags);
105 }
106
107 /* Map remaining sectors in chunk, starting from ppa */
108 static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa)
109 {
110         struct pblk_line *line;
111         struct ppa_addr map_ppa = *ppa;
112         u64 paddr;
113         int done = 0;
114
115         line = pblk_ppa_to_line(pblk, *ppa);
116         spin_lock(&line->lock);
117
118         while (!done)  {
119                 paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
120
121                 if (!test_and_set_bit(paddr, line->map_bitmap))
122                         line->left_msecs--;
123
124                 if (!test_and_set_bit(paddr, line->invalid_bitmap))
125                         le32_add_cpu(line->vsc, -1);
126
127                 done = nvm_next_ppa_in_chk(pblk->dev, &map_ppa);
128         }
129
130         line->w_err_gc->has_write_err = 1;
131         spin_unlock(&line->lock);
132 }
133
134 static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
135                                   unsigned int nr_entries)
136 {
137         struct pblk_rb *rb = &pblk->rwb;
138         struct pblk_rb_entry *entry;
139         struct pblk_line *line;
140         struct pblk_w_ctx *w_ctx;
141         struct ppa_addr ppa_l2p;
142         int flags;
143         unsigned int i;
144
145         spin_lock(&pblk->trans_lock);
146         for (i = 0; i < nr_entries; i++) {
147                 entry = &rb->entries[pblk_rb_ptr_wrap(rb, sentry, i)];
148                 w_ctx = &entry->w_ctx;
149
150                 /* Check if the lba has been overwritten */
151                 ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
152                 if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
153                         w_ctx->lba = ADDR_EMPTY;
154
155                 /* Mark up the entry as submittable again */
156                 flags = READ_ONCE(w_ctx->flags);
157                 flags |= PBLK_WRITTEN_DATA;
158                 /* Release flags on write context. Protect from writes */
159                 smp_store_release(&w_ctx->flags, flags);
160
161                 /* Decrease the reference count to the line as we will
162                  * re-map these entries
163                  */
164                 line = pblk_ppa_to_line(pblk, w_ctx->ppa);
165                 kref_put(&line->ref, pblk_line_put);
166         }
167         spin_unlock(&pblk->trans_lock);
168 }
169
170 static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
171 {
172         struct pblk_c_ctx *r_ctx;
173
174         r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
175         if (!r_ctx)
176                 return;
177
178         r_ctx->lun_bitmap = NULL;
179         r_ctx->sentry = c_ctx->sentry;
180         r_ctx->nr_valid = c_ctx->nr_valid;
181         r_ctx->nr_padded = c_ctx->nr_padded;
182
183         spin_lock(&pblk->resubmit_lock);
184         list_add_tail(&r_ctx->list, &pblk->resubmit_list);
185         spin_unlock(&pblk->resubmit_lock);
186
187 #ifdef CONFIG_NVM_PBLK_DEBUG
188         atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
189 #endif
190 }
191
192 static void pblk_submit_rec(struct work_struct *work)
193 {
194         struct pblk_rec_ctx *recovery =
195                         container_of(work, struct pblk_rec_ctx, ws_rec);
196         struct pblk *pblk = recovery->pblk;
197         struct nvm_rq *rqd = recovery->rqd;
198         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
199         struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
200
201         pblk_log_write_err(pblk, rqd);
202
203         pblk_map_remaining(pblk, ppa_list);
204         pblk_queue_resubmit(pblk, c_ctx);
205
206         pblk_up_rq(pblk, c_ctx->lun_bitmap);
207         if (c_ctx->nr_padded)
208                 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
209                                                         c_ctx->nr_padded);
210         bio_put(rqd->bio);
211         pblk_free_rqd(pblk, rqd, PBLK_WRITE);
212         mempool_free(recovery, &pblk->rec_pool);
213
214         atomic_dec(&pblk->inflight_io);
215 }
216
217
218 static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
219 {
220         struct pblk_rec_ctx *recovery;
221
222         recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
223         if (!recovery) {
224                 pblk_err(pblk, "could not allocate recovery work\n");
225                 return;
226         }
227
228         recovery->pblk = pblk;
229         recovery->rqd = rqd;
230
231         INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
232         queue_work(pblk->close_wq, &recovery->ws_rec);
233 }
234
235 static void pblk_end_io_write(struct nvm_rq *rqd)
236 {
237         struct pblk *pblk = rqd->private;
238         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
239
240         if (rqd->error) {
241                 pblk_end_w_fail(pblk, rqd);
242                 return;
243         } else {
244                 if (trace_pblk_chunk_state_enabled())
245                         pblk_check_chunk_state_update(pblk, rqd);
246 #ifdef CONFIG_NVM_PBLK_DEBUG
247                 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
248 #endif
249         }
250
251         pblk_complete_write(pblk, rqd, c_ctx);
252         atomic_dec(&pblk->inflight_io);
253 }
254
255 static void pblk_end_io_write_meta(struct nvm_rq *rqd)
256 {
257         struct pblk *pblk = rqd->private;
258         struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
259         struct pblk_line *line = m_ctx->private;
260         struct pblk_emeta *emeta = line->emeta;
261         struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
262         int sync;
263
264         pblk_up_chunk(pblk, ppa_list[0]);
265
266         if (rqd->error) {
267                 pblk_log_write_err(pblk, rqd);
268                 pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
269                 line->w_err_gc->has_write_err = 1;
270         } else {
271                 if (trace_pblk_chunk_state_enabled())
272                         pblk_check_chunk_state_update(pblk, rqd);
273         }
274
275         sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
276         if (sync == emeta->nr_entries)
277                 pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
278                                                 GFP_ATOMIC, pblk->close_wq);
279
280         pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
281
282         atomic_dec(&pblk->inflight_io);
283 }
284
285 static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
286                            unsigned int nr_secs, nvm_end_io_fn(*end_io))
287 {
288         /* Setup write request */
289         rqd->opcode = NVM_OP_PWRITE;
290         rqd->nr_ppas = nr_secs;
291         rqd->is_seq = 1;
292         rqd->private = pblk;
293         rqd->end_io = end_io;
294
295         return pblk_alloc_rqd_meta(pblk, rqd);
296 }
297
298 static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
299                            struct ppa_addr *erase_ppa)
300 {
301         struct pblk_line_meta *lm = &pblk->lm;
302         struct pblk_line *e_line = pblk_line_get_erase(pblk);
303         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
304         unsigned int valid = c_ctx->nr_valid;
305         unsigned int padded = c_ctx->nr_padded;
306         unsigned int nr_secs = valid + padded;
307         unsigned long *lun_bitmap;
308         int ret;
309
310         lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
311         if (!lun_bitmap)
312                 return -ENOMEM;
313         c_ctx->lun_bitmap = lun_bitmap;
314
315         ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
316         if (ret) {
317                 kfree(lun_bitmap);
318                 return ret;
319         }
320
321         if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
322                 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
323         else
324                 pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
325                                                         valid, erase_ppa);
326
327         return 0;
328 }
329
330 static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
331                                   unsigned int secs_to_flush)
332 {
333         int secs_to_sync;
334
335         secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
336
337 #ifdef CONFIG_NVM_PBLK_DEBUG
338         if ((!secs_to_sync && secs_to_flush)
339                         || (secs_to_sync < 0)
340                         || (secs_to_sync > secs_avail && !secs_to_flush)) {
341                 pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n",
342                                 secs_avail, secs_to_sync, secs_to_flush);
343         }
344 #endif
345
346         return secs_to_sync;
347 }
348
349 int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
350 {
351         struct nvm_tgt_dev *dev = pblk->dev;
352         struct nvm_geo *geo = &dev->geo;
353         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
354         struct pblk_line_meta *lm = &pblk->lm;
355         struct pblk_emeta *emeta = meta_line->emeta;
356         struct ppa_addr *ppa_list;
357         struct pblk_g_ctx *m_ctx;
358         struct bio *bio;
359         struct nvm_rq *rqd;
360         void *data;
361         u64 paddr;
362         int rq_ppas = pblk->min_write_pgs;
363         int id = meta_line->id;
364         int rq_len;
365         int i, j;
366         int ret;
367
368         rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
369
370         m_ctx = nvm_rq_to_pdu(rqd);
371         m_ctx->private = meta_line;
372
373         rq_len = rq_ppas * geo->csecs;
374         data = ((void *)emeta->buf) + emeta->mem;
375
376         bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
377                                         l_mg->emeta_alloc_type, GFP_KERNEL);
378         if (IS_ERR(bio)) {
379                 pblk_err(pblk, "failed to map emeta io");
380                 ret = PTR_ERR(bio);
381                 goto fail_free_rqd;
382         }
383         bio->bi_iter.bi_sector = 0; /* internal bio */
384         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
385         rqd->bio = bio;
386
387         ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
388         if (ret)
389                 goto fail_free_bio;
390
391         ppa_list = nvm_rq_to_ppa_list(rqd);
392         for (i = 0; i < rqd->nr_ppas; ) {
393                 spin_lock(&meta_line->lock);
394                 paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
395                 spin_unlock(&meta_line->lock);
396                 for (j = 0; j < rq_ppas; j++, i++, paddr++)
397                         ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
398         }
399
400         spin_lock(&l_mg->close_lock);
401         emeta->mem += rq_len;
402         if (emeta->mem >= lm->emeta_len[0])
403                 list_del(&meta_line->list);
404         spin_unlock(&l_mg->close_lock);
405
406         pblk_down_chunk(pblk, ppa_list[0]);
407
408         ret = pblk_submit_io(pblk, rqd);
409         if (ret) {
410                 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
411                 goto fail_rollback;
412         }
413
414         return NVM_IO_OK;
415
416 fail_rollback:
417         pblk_up_chunk(pblk, ppa_list[0]);
418         spin_lock(&l_mg->close_lock);
419         pblk_dealloc_page(pblk, meta_line, rq_ppas);
420         list_add(&meta_line->list, &meta_line->list);
421         spin_unlock(&l_mg->close_lock);
422 fail_free_bio:
423         bio_put(bio);
424 fail_free_rqd:
425         pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
426         return ret;
427 }
428
429 static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
430                                        struct pblk_line *meta_line,
431                                        struct nvm_rq *data_rqd)
432 {
433         struct nvm_tgt_dev *dev = pblk->dev;
434         struct nvm_geo *geo = &dev->geo;
435         struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
436         struct pblk_line *data_line = pblk_line_get_data(pblk);
437         struct ppa_addr ppa, ppa_opt;
438         u64 paddr;
439         int pos_opt;
440
441         /* Schedule a metadata I/O that is half the distance from the data I/O
442          * with regards to the number of LUNs forming the pblk instance. This
443          * balances LUN conflicts across every I/O.
444          *
445          * When the LUN configuration changes (e.g., due to GC), this distance
446          * can align, which would result on metadata and data I/Os colliding. In
447          * this case, modify the distance to not be optimal, but move the
448          * optimal in the right direction.
449          */
450         paddr = pblk_lookup_page(pblk, meta_line);
451         ppa = addr_to_gen_ppa(pblk, paddr, 0);
452         ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
453         pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
454
455         if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
456                                 test_bit(pos_opt, data_line->blk_bitmap))
457                 return true;
458
459         if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
460                 data_line->meta_distance--;
461
462         return false;
463 }
464
465 static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
466                                                     struct nvm_rq *data_rqd)
467 {
468         struct pblk_line_meta *lm = &pblk->lm;
469         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
470         struct pblk_line *meta_line;
471
472         spin_lock(&l_mg->close_lock);
473         if (list_empty(&l_mg->emeta_list)) {
474                 spin_unlock(&l_mg->close_lock);
475                 return NULL;
476         }
477         meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
478         if (meta_line->emeta->mem >= lm->emeta_len[0]) {
479                 spin_unlock(&l_mg->close_lock);
480                 return NULL;
481         }
482         spin_unlock(&l_mg->close_lock);
483
484         if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
485                 return NULL;
486
487         return meta_line;
488 }
489
490 static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
491 {
492         struct ppa_addr erase_ppa;
493         struct pblk_line *meta_line;
494         int err;
495
496         pblk_ppa_set_empty(&erase_ppa);
497
498         /* Assign lbas to ppas and populate request structure */
499         err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
500         if (err) {
501                 pblk_err(pblk, "could not setup write request: %d\n", err);
502                 return NVM_IO_ERR;
503         }
504
505         meta_line = pblk_should_submit_meta_io(pblk, rqd);
506
507         /* Submit data write for current data line */
508         err = pblk_submit_io(pblk, rqd);
509         if (err) {
510                 pblk_err(pblk, "data I/O submission failed: %d\n", err);
511                 return NVM_IO_ERR;
512         }
513
514         if (!pblk_ppa_empty(erase_ppa)) {
515                 /* Submit erase for next data line */
516                 if (pblk_blk_erase_async(pblk, erase_ppa)) {
517                         struct pblk_line *e_line = pblk_line_get_erase(pblk);
518                         struct nvm_tgt_dev *dev = pblk->dev;
519                         struct nvm_geo *geo = &dev->geo;
520                         int bit;
521
522                         atomic_inc(&e_line->left_eblks);
523                         bit = pblk_ppa_to_pos(geo, erase_ppa);
524                         WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
525                 }
526         }
527
528         if (meta_line) {
529                 /* Submit metadata write for previous data line */
530                 err = pblk_submit_meta_io(pblk, meta_line);
531                 if (err) {
532                         pblk_err(pblk, "metadata I/O submission failed: %d",
533                                         err);
534                         return NVM_IO_ERR;
535                 }
536         }
537
538         return NVM_IO_OK;
539 }
540
541 static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
542 {
543         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
544         struct bio *bio = rqd->bio;
545
546         if (c_ctx->nr_padded)
547                 pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
548                                                         c_ctx->nr_padded);
549 }
550
551 static int pblk_submit_write(struct pblk *pblk)
552 {
553         struct bio *bio;
554         struct nvm_rq *rqd;
555         unsigned int secs_avail, secs_to_sync, secs_to_com;
556         unsigned int secs_to_flush;
557         unsigned long pos;
558         unsigned int resubmit;
559
560         spin_lock(&pblk->resubmit_lock);
561         resubmit = !list_empty(&pblk->resubmit_list);
562         spin_unlock(&pblk->resubmit_lock);
563
564         /* Resubmit failed writes first */
565         if (resubmit) {
566                 struct pblk_c_ctx *r_ctx;
567
568                 spin_lock(&pblk->resubmit_lock);
569                 r_ctx = list_first_entry(&pblk->resubmit_list,
570                                         struct pblk_c_ctx, list);
571                 list_del(&r_ctx->list);
572                 spin_unlock(&pblk->resubmit_lock);
573
574                 secs_avail = r_ctx->nr_valid;
575                 pos = r_ctx->sentry;
576
577                 pblk_prepare_resubmit(pblk, pos, secs_avail);
578                 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
579                                 secs_avail);
580
581                 kfree(r_ctx);
582         } else {
583                 /* If there are no sectors in the cache,
584                  * flushes (bios without data) will be cleared on
585                  * the cache threads
586                  */
587                 secs_avail = pblk_rb_read_count(&pblk->rwb);
588                 if (!secs_avail)
589                         return 1;
590
591                 secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
592                 if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
593                         return 1;
594
595                 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
596                                         secs_to_flush);
597                 if (secs_to_sync > pblk->max_write_pgs) {
598                         pblk_err(pblk, "bad buffer sync calculation\n");
599                         return 1;
600                 }
601
602                 secs_to_com = (secs_to_sync > secs_avail) ?
603                         secs_avail : secs_to_sync;
604                 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
605         }
606
607         bio = bio_alloc(GFP_KERNEL, secs_to_sync);
608
609         bio->bi_iter.bi_sector = 0; /* internal bio */
610         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
611
612         rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
613         rqd->bio = bio;
614
615         if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
616                                                                 secs_avail)) {
617                 pblk_err(pblk, "corrupted write bio\n");
618                 goto fail_put_bio;
619         }
620
621         if (pblk_submit_io_set(pblk, rqd))
622                 goto fail_free_bio;
623
624 #ifdef CONFIG_NVM_PBLK_DEBUG
625         atomic_long_add(secs_to_sync, &pblk->sub_writes);
626 #endif
627
628         return 0;
629
630 fail_free_bio:
631         pblk_free_write_rqd(pblk, rqd);
632 fail_put_bio:
633         bio_put(bio);
634         pblk_free_rqd(pblk, rqd, PBLK_WRITE);
635
636         return 1;
637 }
638
639 int pblk_write_ts(void *data)
640 {
641         struct pblk *pblk = data;
642
643         while (!kthread_should_stop()) {
644                 if (!pblk_submit_write(pblk))
645                         continue;
646                 set_current_state(TASK_INTERRUPTIBLE);
647                 io_schedule();
648         }
649
650         return 0;
651 }