]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/lightnvm/pblk-write.c
lightnvm: pblk: set mempool and workqueue params.
[linux.git] / drivers / lightnvm / pblk-write.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-write.c - pblk's write path from write buffer to media
16  */
17
18 #include "pblk.h"
19
20 static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
21                                     struct pblk_c_ctx *c_ctx)
22 {
23         struct nvm_tgt_dev *dev = pblk->dev;
24         struct bio *original_bio;
25         unsigned long ret;
26         int i;
27
28         for (i = 0; i < c_ctx->nr_valid; i++) {
29                 struct pblk_w_ctx *w_ctx;
30
31                 w_ctx = pblk_rb_w_ctx(&pblk->rwb, c_ctx->sentry + i);
32                 while ((original_bio = bio_list_pop(&w_ctx->bios)))
33                         bio_endio(original_bio);
34         }
35
36 #ifdef CONFIG_NVM_DEBUG
37         atomic_long_add(c_ctx->nr_valid, &pblk->sync_writes);
38 #endif
39
40         ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
41
42         if (rqd->meta_list)
43                 nvm_dev_dma_free(dev->parent, rqd->meta_list,
44                                                         rqd->dma_meta_list);
45
46         bio_put(rqd->bio);
47         pblk_free_rqd(pblk, rqd, WRITE);
48
49         return ret;
50 }
51
52 static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
53                                            struct nvm_rq *rqd,
54                                            struct pblk_c_ctx *c_ctx)
55 {
56         list_del(&c_ctx->list);
57         return pblk_end_w_bio(pblk, rqd, c_ctx);
58 }
59
60 static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
61                                 struct pblk_c_ctx *c_ctx)
62 {
63         struct pblk_c_ctx *c, *r;
64         unsigned long flags;
65         unsigned long pos;
66
67 #ifdef CONFIG_NVM_DEBUG
68         atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
69 #endif
70
71         pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
72
73         pos = pblk_rb_sync_init(&pblk->rwb, &flags);
74         if (pos == c_ctx->sentry) {
75                 pos = pblk_end_w_bio(pblk, rqd, c_ctx);
76
77 retry:
78                 list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
79                         rqd = nvm_rq_from_c_ctx(c);
80                         if (c->sentry == pos) {
81                                 pos = pblk_end_queued_w_bio(pblk, rqd, c);
82                                 goto retry;
83                         }
84                 }
85         } else {
86                 WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
87                 list_add_tail(&c_ctx->list, &pblk->compl_list);
88         }
89         pblk_rb_sync_end(&pblk->rwb, &flags);
90 }
91
92 /* When a write fails, we are not sure whether the block has grown bad or a page
93  * range is more susceptible to write errors. If a high number of pages fail, we
94  * assume that the block is bad and we mark it accordingly. In all cases, we
95  * remap and resubmit the failed entries as fast as possible; if a flush is
96  * waiting on a completion, the whole stack would stall otherwise.
97  */
98 static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
99 {
100         void *comp_bits = &rqd->ppa_status;
101         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
102         struct pblk_rec_ctx *recovery;
103         struct ppa_addr *ppa_list = rqd->ppa_list;
104         int nr_ppas = rqd->nr_ppas;
105         unsigned int c_entries;
106         int bit, ret;
107
108         if (unlikely(nr_ppas == 1))
109                 ppa_list = &rqd->ppa_addr;
110
111         recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC);
112         if (!recovery) {
113                 pr_err("pblk: could not allocate recovery context\n");
114                 return;
115         }
116         INIT_LIST_HEAD(&recovery->failed);
117
118         bit = -1;
119         while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
120                 struct pblk_rb_entry *entry;
121                 struct ppa_addr ppa;
122
123                 /* Logic error */
124                 if (bit > c_ctx->nr_valid) {
125                         WARN_ONCE(1, "pblk: corrupted write request\n");
126                         mempool_free(recovery, pblk->rec_pool);
127                         goto out;
128                 }
129
130                 ppa = ppa_list[bit];
131                 entry = pblk_rb_sync_scan_entry(&pblk->rwb, &ppa);
132                 if (!entry) {
133                         pr_err("pblk: could not scan entry on write failure\n");
134                         mempool_free(recovery, pblk->rec_pool);
135                         goto out;
136                 }
137
138                 /* The list is filled first and emptied afterwards. No need for
139                  * protecting it with a lock
140                  */
141                 list_add_tail(&entry->index, &recovery->failed);
142         }
143
144         c_entries = find_first_bit(comp_bits, nr_ppas);
145         ret = pblk_recov_setup_rq(pblk, c_ctx, recovery, comp_bits, c_entries);
146         if (ret) {
147                 pr_err("pblk: could not recover from write failure\n");
148                 mempool_free(recovery, pblk->rec_pool);
149                 goto out;
150         }
151
152         INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
153         queue_work(pblk->close_wq, &recovery->ws_rec);
154
155 out:
156         pblk_complete_write(pblk, rqd, c_ctx);
157 }
158
159 static void pblk_end_io_write(struct nvm_rq *rqd)
160 {
161         struct pblk *pblk = rqd->private;
162         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
163
164         if (rqd->error) {
165                 pblk_log_write_err(pblk, rqd);
166                 return pblk_end_w_fail(pblk, rqd);
167         }
168 #ifdef CONFIG_NVM_DEBUG
169         else
170                 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
171 #endif
172
173         pblk_complete_write(pblk, rqd, c_ctx);
174 }
175
176 static void pblk_end_io_write_meta(struct nvm_rq *rqd)
177 {
178         struct pblk *pblk = rqd->private;
179         struct nvm_tgt_dev *dev = pblk->dev;
180         struct nvm_geo *geo = &dev->geo;
181         struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
182         struct pblk_line *line = m_ctx->private;
183         struct pblk_emeta *emeta = line->emeta;
184         int pos = pblk_ppa_to_pos(geo, rqd->ppa_list[0]);
185         struct pblk_lun *rlun = &pblk->luns[pos];
186         int sync;
187
188         up(&rlun->wr_sem);
189
190         if (rqd->error) {
191                 pblk_log_write_err(pblk, rqd);
192                 pr_err("pblk: metadata I/O failed\n");
193         }
194 #ifdef CONFIG_NVM_DEBUG
195         else
196                 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
197 #endif
198
199         sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
200         if (sync == emeta->nr_entries)
201                 pblk_line_run_ws(pblk, line, NULL, pblk_line_close_ws,
202                                                                 pblk->close_wq);
203
204         bio_put(rqd->bio);
205         pblk_free_rqd(pblk, rqd, READ);
206 }
207
208 static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
209                            unsigned int nr_secs,
210                            nvm_end_io_fn(*end_io))
211 {
212         struct nvm_tgt_dev *dev = pblk->dev;
213
214         /* Setup write request */
215         rqd->opcode = NVM_OP_PWRITE;
216         rqd->nr_ppas = nr_secs;
217         rqd->flags = pblk_set_progr_mode(pblk, WRITE);
218         rqd->private = pblk;
219         rqd->end_io = end_io;
220
221         rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
222                                                         &rqd->dma_meta_list);
223         if (!rqd->meta_list)
224                 return -ENOMEM;
225
226         if (unlikely(nr_secs == 1))
227                 return 0;
228
229         rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
230         rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
231
232         return 0;
233 }
234
235 static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
236                            struct pblk_c_ctx *c_ctx, struct ppa_addr *erase_ppa)
237 {
238         struct pblk_line_meta *lm = &pblk->lm;
239         struct pblk_line *e_line = pblk_line_get_erase(pblk);
240         unsigned int valid = c_ctx->nr_valid;
241         unsigned int padded = c_ctx->nr_padded;
242         unsigned int nr_secs = valid + padded;
243         unsigned long *lun_bitmap;
244         int ret = 0;
245
246         lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
247         if (!lun_bitmap)
248                 return -ENOMEM;
249         c_ctx->lun_bitmap = lun_bitmap;
250
251         ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
252         if (ret) {
253                 kfree(lun_bitmap);
254                 return ret;
255         }
256
257         if (likely(!atomic_read(&e_line->left_eblks) || !e_line))
258                 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
259         else
260                 pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
261                                                         valid, erase_ppa);
262
263         return 0;
264 }
265
266 int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
267                         struct pblk_c_ctx *c_ctx)
268 {
269         struct pblk_line_meta *lm = &pblk->lm;
270         unsigned long *lun_bitmap;
271         int ret;
272
273         lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
274         if (!lun_bitmap)
275                 return -ENOMEM;
276
277         c_ctx->lun_bitmap = lun_bitmap;
278
279         ret = pblk_alloc_w_rq(pblk, rqd, rqd->nr_ppas, pblk_end_io_write);
280         if (ret)
281                 return ret;
282
283         pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, c_ctx->nr_valid, 0);
284
285         rqd->ppa_status = (u64)0;
286         rqd->flags = pblk_set_progr_mode(pblk, WRITE);
287
288         return ret;
289 }
290
291 static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
292                                   unsigned int secs_to_flush)
293 {
294         int secs_to_sync;
295
296         secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
297
298 #ifdef CONFIG_NVM_DEBUG
299         if ((!secs_to_sync && secs_to_flush)
300                         || (secs_to_sync < 0)
301                         || (secs_to_sync > secs_avail && !secs_to_flush)) {
302                 pr_err("pblk: bad sector calculation (a:%d,s:%d,f:%d)\n",
303                                 secs_avail, secs_to_sync, secs_to_flush);
304         }
305 #endif
306
307         return secs_to_sync;
308 }
309
310 static inline int pblk_valid_meta_ppa(struct pblk *pblk,
311                                       struct pblk_line *meta_line,
312                                       struct ppa_addr *ppa_list, int nr_ppas)
313 {
314         struct nvm_tgt_dev *dev = pblk->dev;
315         struct nvm_geo *geo = &dev->geo;
316         struct pblk_line *data_line;
317         struct ppa_addr ppa, ppa_opt;
318         u64 paddr;
319         int i;
320
321         data_line = &pblk->lines[pblk_dev_ppa_to_line(ppa_list[0])];
322         paddr = pblk_lookup_page(pblk, meta_line);
323         ppa = addr_to_gen_ppa(pblk, paddr, 0);
324
325         if (test_bit(pblk_ppa_to_pos(geo, ppa), data_line->blk_bitmap))
326                 return 1;
327
328         /* Schedule a metadata I/O that is half the distance from the data I/O
329          * with regards to the number of LUNs forming the pblk instance. This
330          * balances LUN conflicts across every I/O.
331          *
332          * When the LUN configuration changes (e.g., due to GC), this distance
333          * can align, which would result on a LUN deadlock. In this case, modify
334          * the distance to not be optimal, but allow metadata I/Os to succeed.
335          */
336         ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
337         if (unlikely(ppa_opt.ppa == ppa.ppa)) {
338                 data_line->meta_distance--;
339                 return 0;
340         }
341
342         for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
343                 if (ppa_list[i].g.ch == ppa_opt.g.ch &&
344                                         ppa_list[i].g.lun == ppa_opt.g.lun)
345                         return 1;
346
347         if (test_bit(pblk_ppa_to_pos(geo, ppa_opt), data_line->blk_bitmap)) {
348                 for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
349                         if (ppa_list[i].g.ch == ppa.g.ch &&
350                                                 ppa_list[i].g.lun == ppa.g.lun)
351                                 return 0;
352
353                 return 1;
354         }
355
356         return 0;
357 }
358
359 int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
360 {
361         struct nvm_tgt_dev *dev = pblk->dev;
362         struct nvm_geo *geo = &dev->geo;
363         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
364         struct pblk_line_meta *lm = &pblk->lm;
365         struct pblk_emeta *emeta = meta_line->emeta;
366         struct pblk_g_ctx *m_ctx;
367         struct pblk_lun *rlun;
368         struct bio *bio;
369         struct nvm_rq *rqd;
370         void *data;
371         u64 paddr;
372         int rq_ppas = pblk->min_write_pgs;
373         int id = meta_line->id;
374         int rq_len;
375         int i, j;
376         int ret;
377
378         rqd = pblk_alloc_rqd(pblk, READ);
379         if (IS_ERR(rqd)) {
380                 pr_err("pblk: cannot allocate write req.\n");
381                 return PTR_ERR(rqd);
382         }
383         m_ctx = nvm_rq_to_pdu(rqd);
384         m_ctx->private = meta_line;
385
386         rq_len = rq_ppas * geo->sec_size;
387         data = ((void *)emeta->buf) + emeta->mem;
388
389         bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len, GFP_KERNEL);
390         if (IS_ERR(bio)) {
391                 ret = PTR_ERR(bio);
392                 goto fail_free_rqd;
393         }
394         bio->bi_iter.bi_sector = 0; /* internal bio */
395         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
396         rqd->bio = bio;
397
398         ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
399         if (ret)
400                 goto fail_free_bio;
401
402         for (i = 0; i < rqd->nr_ppas; ) {
403                 spin_lock(&meta_line->lock);
404                 paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
405                 spin_unlock(&meta_line->lock);
406                 for (j = 0; j < rq_ppas; j++, i++, paddr++)
407                         rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
408         }
409
410         rlun = &pblk->luns[pblk_ppa_to_pos(geo, rqd->ppa_list[0])];
411         ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
412         if (ret) {
413                 pr_err("pblk: lun semaphore timed out (%d)\n", ret);
414                 goto fail_free_bio;
415         }
416
417         emeta->mem += rq_len;
418         if (emeta->mem >= lm->emeta_len[0]) {
419                 spin_lock(&l_mg->close_lock);
420                 list_del(&meta_line->list);
421                 WARN(!bitmap_full(meta_line->map_bitmap, lm->sec_per_line),
422                                 "pblk: corrupt meta line %d\n", meta_line->id);
423                 spin_unlock(&l_mg->close_lock);
424         }
425
426         ret = pblk_submit_io(pblk, rqd);
427         if (ret) {
428                 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
429                 goto fail_rollback;
430         }
431
432         return NVM_IO_OK;
433
434 fail_rollback:
435         spin_lock(&l_mg->close_lock);
436         pblk_dealloc_page(pblk, meta_line, rq_ppas);
437         list_add(&meta_line->list, &meta_line->list);
438         spin_unlock(&l_mg->close_lock);
439 fail_free_bio:
440         if (likely(l_mg->emeta_alloc_type == PBLK_VMALLOC_META))
441                 bio_put(bio);
442 fail_free_rqd:
443         pblk_free_rqd(pblk, rqd, READ);
444         return ret;
445 }
446
447 static int pblk_sched_meta_io(struct pblk *pblk, struct ppa_addr *prev_list,
448                                int prev_n)
449 {
450         struct pblk_line_meta *lm = &pblk->lm;
451         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
452         struct pblk_line *meta_line;
453
454         spin_lock(&l_mg->close_lock);
455 retry:
456         if (list_empty(&l_mg->emeta_list)) {
457                 spin_unlock(&l_mg->close_lock);
458                 return 0;
459         }
460         meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
461         if (bitmap_full(meta_line->map_bitmap, lm->sec_per_line))
462                 goto retry;
463         spin_unlock(&l_mg->close_lock);
464
465         if (!pblk_valid_meta_ppa(pblk, meta_line, prev_list, prev_n))
466                 return 0;
467
468         return pblk_submit_meta_io(pblk, meta_line);
469 }
470
471 static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
472 {
473         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
474         struct ppa_addr erase_ppa;
475         int err;
476
477         ppa_set_empty(&erase_ppa);
478
479         /* Assign lbas to ppas and populate request structure */
480         err = pblk_setup_w_rq(pblk, rqd, c_ctx, &erase_ppa);
481         if (err) {
482                 pr_err("pblk: could not setup write request: %d\n", err);
483                 return NVM_IO_ERR;
484         }
485
486         if (likely(ppa_empty(erase_ppa))) {
487                 /* Submit metadata write for previous data line */
488                 err = pblk_sched_meta_io(pblk, rqd->ppa_list, rqd->nr_ppas);
489                 if (err) {
490                         pr_err("pblk: metadata I/O submission failed: %d", err);
491                         return NVM_IO_ERR;
492                 }
493
494                 /* Submit data write for current data line */
495                 err = pblk_submit_io(pblk, rqd);
496                 if (err) {
497                         pr_err("pblk: data I/O submission failed: %d\n", err);
498                         return NVM_IO_ERR;
499                 }
500         } else {
501                 /* Submit data write for current data line */
502                 err = pblk_submit_io(pblk, rqd);
503                 if (err) {
504                         pr_err("pblk: data I/O submission failed: %d\n", err);
505                         return NVM_IO_ERR;
506                 }
507
508                 /* Submit available erase for next data line */
509                 if (pblk_blk_erase_async(pblk, erase_ppa)) {
510                         struct pblk_line *e_line = pblk_line_get_erase(pblk);
511                         struct nvm_tgt_dev *dev = pblk->dev;
512                         struct nvm_geo *geo = &dev->geo;
513                         int bit;
514
515                         atomic_inc(&e_line->left_eblks);
516                         bit = pblk_ppa_to_pos(geo, erase_ppa);
517                         WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
518                 }
519         }
520
521         return NVM_IO_OK;
522 }
523
524 static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
525 {
526         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
527         struct bio *bio = rqd->bio;
528
529         if (c_ctx->nr_padded)
530                 pblk_bio_free_pages(pblk, bio, rqd->nr_ppas, c_ctx->nr_padded);
531 }
532
533 static int pblk_submit_write(struct pblk *pblk)
534 {
535         struct bio *bio;
536         struct nvm_rq *rqd;
537         unsigned int secs_avail, secs_to_sync, secs_to_com;
538         unsigned int secs_to_flush;
539         unsigned long pos;
540
541         /* If there are no sectors in the cache, flushes (bios without data)
542          * will be cleared on the cache threads
543          */
544         secs_avail = pblk_rb_read_count(&pblk->rwb);
545         if (!secs_avail)
546                 return 1;
547
548         secs_to_flush = pblk_rb_sync_point_count(&pblk->rwb);
549         if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
550                 return 1;
551
552         rqd = pblk_alloc_rqd(pblk, WRITE);
553         if (IS_ERR(rqd)) {
554                 pr_err("pblk: cannot allocate write req.\n");
555                 return 1;
556         }
557
558         bio = bio_alloc(GFP_KERNEL, pblk->max_write_pgs);
559         if (!bio) {
560                 pr_err("pblk: cannot allocate write bio\n");
561                 goto fail_free_rqd;
562         }
563         bio->bi_iter.bi_sector = 0; /* internal bio */
564         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
565         rqd->bio = bio;
566
567         secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush);
568         if (secs_to_sync > pblk->max_write_pgs) {
569                 pr_err("pblk: bad buffer sync calculation\n");
570                 goto fail_put_bio;
571         }
572
573         secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync;
574         pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
575
576         if (pblk_rb_read_to_bio(&pblk->rwb, rqd, bio, pos, secs_to_sync,
577                                                                 secs_avail)) {
578                 pr_err("pblk: corrupted write bio\n");
579                 goto fail_put_bio;
580         }
581
582         if (pblk_submit_io_set(pblk, rqd))
583                 goto fail_free_bio;
584
585 #ifdef CONFIG_NVM_DEBUG
586         atomic_long_add(secs_to_sync, &pblk->sub_writes);
587 #endif
588
589         return 0;
590
591 fail_free_bio:
592         pblk_free_write_rqd(pblk, rqd);
593 fail_put_bio:
594         bio_put(bio);
595 fail_free_rqd:
596         pblk_free_rqd(pblk, rqd, WRITE);
597
598         return 1;
599 }
600
601 int pblk_write_ts(void *data)
602 {
603         struct pblk *pblk = data;
604
605         while (!kthread_should_stop()) {
606                 if (!pblk_submit_write(pblk))
607                         continue;
608                 set_current_state(TASK_INTERRUPTIBLE);
609                 io_schedule();
610         }
611
612         return 0;
613 }