2 * Copyright (C) 2016 CNEX Labs
3 * Initial: Javier Gonzalez <javier@cnexlabs.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * pblk-recovery.c - pblk's recovery path
18 #include "pblk-trace.h"
20 int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
24 crc = pblk_calc_emeta_crc(pblk, emeta_buf);
25 if (le32_to_cpu(emeta_buf->crc) != crc)
28 if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
34 static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
36 struct nvm_tgt_dev *dev = pblk->dev;
37 struct nvm_geo *geo = &dev->geo;
38 struct pblk_line_meta *lm = &pblk->lm;
39 struct pblk_emeta *emeta = line->emeta;
40 struct line_emeta *emeta_buf = emeta->buf;
42 u64 data_start, data_end;
43 u64 nr_valid_lbas, nr_lbas = 0;
46 lba_list = emeta_to_lbas(pblk, emeta_buf);
50 data_start = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
51 data_end = line->emeta_ssec;
52 nr_valid_lbas = le64_to_cpu(emeta_buf->nr_valid_lbas);
54 for (i = data_start; i < data_end; i++) {
58 ppa = addr_to_gen_ppa(pblk, i, line->id);
59 pos = pblk_ppa_to_pos(geo, ppa);
61 /* Do not update bad blocks */
62 if (test_bit(pos, line->blk_bitmap))
65 if (le64_to_cpu(lba_list[i]) == ADDR_EMPTY) {
66 spin_lock(&line->lock);
67 if (test_and_set_bit(i, line->invalid_bitmap))
68 WARN_ONCE(1, "pblk: rec. double invalidate:\n");
70 le32_add_cpu(line->vsc, -1);
71 spin_unlock(&line->lock);
76 pblk_update_map(pblk, le64_to_cpu(lba_list[i]), ppa);
80 if (nr_valid_lbas != nr_lbas)
81 pblk_err(pblk, "line %d - inconsistent lba list(%llu/%llu)\n",
82 line->id, nr_valid_lbas, nr_lbas);
89 static void pblk_update_line_wp(struct pblk *pblk, struct pblk_line *line,
94 for (i = 0; i < written_secs; i += pblk->min_write_pgs)
95 pblk_alloc_page(pblk, line, pblk->min_write_pgs);
98 static u64 pblk_sec_in_open_line(struct pblk *pblk, struct pblk_line *line)
100 struct pblk_line_meta *lm = &pblk->lm;
101 int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
102 u64 written_secs = 0;
103 int valid_chunks = 0;
106 for (i = 0; i < lm->blk_per_line; i++) {
107 struct nvm_chk_meta *chunk = &line->chks[i];
109 if (chunk->state & NVM_CHK_ST_OFFLINE)
112 written_secs += chunk->wp;
116 if (lm->blk_per_line - nr_bb != valid_chunks)
117 pblk_err(pblk, "recovery line %d is bad\n", line->id);
119 pblk_update_line_wp(pblk, line, written_secs - lm->smeta_sec);
124 struct pblk_recov_alloc {
125 struct ppa_addr *ppa_list;
126 struct pblk_sec_meta *meta_list;
129 dma_addr_t dma_ppa_list;
130 dma_addr_t dma_meta_list;
133 static void pblk_recov_complete(struct kref *ref)
135 struct pblk_pad_rq *pad_rq = container_of(ref, struct pblk_pad_rq, ref);
137 complete(&pad_rq->wait);
140 static void pblk_end_io_recov(struct nvm_rq *rqd)
142 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
143 struct pblk_pad_rq *pad_rq = rqd->private;
144 struct pblk *pblk = pad_rq->pblk;
146 pblk_up_chunk(pblk, ppa_list[0]);
148 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
150 atomic_dec(&pblk->inflight_io);
151 kref_put(&pad_rq->ref, pblk_recov_complete);
154 /* pad line using line bitmap. */
155 static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
158 struct nvm_tgt_dev *dev = pblk->dev;
159 struct nvm_geo *geo = &dev->geo;
160 struct pblk_sec_meta *meta_list;
161 struct pblk_pad_rq *pad_rq;
165 __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
166 u64 w_ptr = line->cur_sec;
167 int left_line_ppas, rq_ppas, rq_len;
171 spin_lock(&line->lock);
172 left_line_ppas = line->left_msecs;
173 spin_unlock(&line->lock);
175 pad_rq = kmalloc(sizeof(struct pblk_pad_rq), GFP_KERNEL);
179 data = vzalloc(array_size(pblk->max_write_pgs, geo->csecs));
186 init_completion(&pad_rq->wait);
187 kref_init(&pad_rq->ref);
190 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
191 if (rq_ppas < pblk->min_write_pgs) {
192 pblk_err(pblk, "corrupted pad line %d\n", line->id);
196 rq_len = rq_ppas * geo->csecs;
198 bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
199 PBLK_VMALLOC_META, GFP_KERNEL);
205 bio->bi_iter.bi_sector = 0; /* internal bio */
206 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
208 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
210 ret = pblk_alloc_rqd_meta(pblk, rqd);
215 rqd->opcode = NVM_OP_PWRITE;
217 rqd->nr_ppas = rq_ppas;
218 rqd->end_io = pblk_end_io_recov;
219 rqd->private = pad_rq;
221 meta_list = rqd->meta_list;
223 for (i = 0; i < rqd->nr_ppas; ) {
227 w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
228 ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
229 pos = pblk_ppa_to_pos(geo, ppa);
231 while (test_bit(pos, line->blk_bitmap)) {
232 w_ptr += pblk->min_write_pgs;
233 ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
234 pos = pblk_ppa_to_pos(geo, ppa);
237 for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++) {
238 struct ppa_addr dev_ppa;
239 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
241 dev_ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
243 pblk_map_invalidate(pblk, dev_ppa);
244 lba_list[w_ptr] = meta_list[i].lba = addr_empty;
245 rqd->ppa_list[i] = dev_ppa;
249 kref_get(&pad_rq->ref);
250 pblk_down_chunk(pblk, rqd->ppa_list[0]);
252 ret = pblk_submit_io(pblk, rqd);
254 pblk_err(pblk, "I/O submission failed: %d\n", ret);
255 pblk_up_chunk(pblk, rqd->ppa_list[0]);
259 left_line_ppas -= rq_ppas;
260 left_ppas -= rq_ppas;
261 if (left_ppas && left_line_ppas)
264 kref_put(&pad_rq->ref, pblk_recov_complete);
266 if (!wait_for_completion_io_timeout(&pad_rq->wait,
267 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
268 pblk_err(pblk, "pad write timed out\n");
272 if (!pblk_line_is_full(line))
273 pblk_err(pblk, "corrupted padded line: %d\n", line->id);
281 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
289 static int pblk_pad_distance(struct pblk *pblk, struct pblk_line *line)
291 struct nvm_tgt_dev *dev = pblk->dev;
292 struct nvm_geo *geo = &dev->geo;
293 int distance = geo->mw_cunits * geo->all_luns * geo->ws_opt;
295 return (distance > line->left_msecs) ? line->left_msecs : distance;
298 static int pblk_line_wp_is_unbalanced(struct pblk *pblk,
299 struct pblk_line *line)
301 struct nvm_tgt_dev *dev = pblk->dev;
302 struct nvm_geo *geo = &dev->geo;
303 struct pblk_line_meta *lm = &pblk->lm;
304 struct pblk_lun *rlun;
305 struct nvm_chk_meta *chunk;
310 rlun = &pblk->luns[0];
312 pos = pblk_ppa_to_pos(geo, ppa);
313 chunk = &line->chks[pos];
317 for (i = 1; i < lm->blk_per_line; i++) {
318 rlun = &pblk->luns[i];
320 pos = pblk_ppa_to_pos(geo, ppa);
321 chunk = &line->chks[pos];
323 if (chunk->wp > line_wp)
325 else if (chunk->wp < line_wp)
332 static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
333 struct pblk_recov_alloc p)
335 struct nvm_tgt_dev *dev = pblk->dev;
336 struct nvm_geo *geo = &dev->geo;
337 struct ppa_addr *ppa_list;
338 struct pblk_sec_meta *meta_list;
342 dma_addr_t dma_ppa_list, dma_meta_list;
349 u64 left_ppas = pblk_sec_in_open_line(pblk, line);
351 if (pblk_line_wp_is_unbalanced(pblk, line))
352 pblk_warn(pblk, "recovering unbalanced line (%d)\n", line->id);
354 ppa_list = p.ppa_list;
355 meta_list = p.meta_list;
358 dma_ppa_list = p.dma_ppa_list;
359 dma_meta_list = p.dma_meta_list;
361 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
364 memset(rqd, 0, pblk_g_rq_size);
366 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
368 rq_ppas = pblk->min_write_pgs;
369 rq_len = rq_ppas * geo->csecs;
371 bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
375 bio->bi_iter.bi_sector = 0; /* internal bio */
376 bio_set_op_attrs(bio, REQ_OP_READ, 0);
379 rqd->opcode = NVM_OP_PREAD;
380 rqd->meta_list = meta_list;
381 rqd->nr_ppas = rq_ppas;
382 rqd->ppa_list = ppa_list;
383 rqd->dma_ppa_list = dma_ppa_list;
384 rqd->dma_meta_list = dma_meta_list;
386 if (pblk_io_aligned(pblk, rq_ppas))
390 for (i = 0; i < rqd->nr_ppas; ) {
394 ppa = addr_to_gen_ppa(pblk, paddr, line->id);
395 pos = pblk_ppa_to_pos(geo, ppa);
397 while (test_bit(pos, line->blk_bitmap)) {
398 paddr += pblk->min_write_pgs;
399 ppa = addr_to_gen_ppa(pblk, paddr, line->id);
400 pos = pblk_ppa_to_pos(geo, ppa);
403 for (j = 0; j < pblk->min_write_pgs; j++, i++)
405 addr_to_gen_ppa(pblk, paddr + j, line->id);
408 ret = pblk_submit_io_sync(pblk, rqd);
410 pblk_err(pblk, "I/O submission failed: %d\n", ret);
415 atomic_dec(&pblk->inflight_io);
417 /* If a read fails, do a best effort by padding the line and retrying */
419 int pad_distance, ret;
422 pblk_log_read_err(pblk, rqd);
426 pad_distance = pblk_pad_distance(pblk, line);
427 ret = pblk_recov_pad_line(pblk, line, pad_distance);
435 for (i = 0; i < rqd->nr_ppas; i++) {
436 u64 lba = le64_to_cpu(meta_list[i].lba);
438 lba_list[paddr++] = cpu_to_le64(lba);
440 if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
443 line->nr_valid_lbas++;
444 pblk_update_map(pblk, lba, rqd->ppa_list[i]);
447 left_ppas -= rq_ppas;
451 #ifdef CONFIG_NVM_PBLK_DEBUG
452 WARN_ON(padded && !pblk_line_is_full(line));
458 /* Scan line for lbas on out of bound area */
459 static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
461 struct nvm_tgt_dev *dev = pblk->dev;
462 struct nvm_geo *geo = &dev->geo;
464 struct ppa_addr *ppa_list;
465 struct pblk_sec_meta *meta_list;
466 struct pblk_recov_alloc p;
468 dma_addr_t dma_ppa_list, dma_meta_list;
471 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
475 ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
476 dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
478 data = kcalloc(pblk->max_write_pgs, geo->csecs, GFP_KERNEL);
484 rqd = mempool_alloc(&pblk->r_rq_pool, GFP_KERNEL);
485 memset(rqd, 0, pblk_g_rq_size);
487 p.ppa_list = ppa_list;
488 p.meta_list = meta_list;
491 p.dma_ppa_list = dma_ppa_list;
492 p.dma_meta_list = dma_meta_list;
494 ret = pblk_recov_scan_oob(pblk, line, p);
496 pblk_err(pblk, "could not recover L2P form OOB\n");
500 if (pblk_line_is_full(line))
501 pblk_line_recov_close(pblk, line);
504 mempool_free(rqd, &pblk->r_rq_pool);
507 nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
512 /* Insert lines ordered by sequence number (seq_num) on list */
513 static void pblk_recov_line_add_ordered(struct list_head *head,
514 struct pblk_line *line)
516 struct pblk_line *t = NULL;
518 list_for_each_entry(t, head, list)
519 if (t->seq_nr > line->seq_nr)
522 __list_add(&line->list, t->list.prev, &t->list);
525 static u64 pblk_line_emeta_start(struct pblk *pblk, struct pblk_line *line)
527 struct nvm_tgt_dev *dev = pblk->dev;
528 struct nvm_geo *geo = &dev->geo;
529 struct pblk_line_meta *lm = &pblk->lm;
530 unsigned int emeta_secs;
535 emeta_secs = lm->emeta_sec[0];
536 emeta_start = lm->sec_per_line;
540 ppa = addr_to_gen_ppa(pblk, emeta_start, line->id);
541 pos = pblk_ppa_to_pos(geo, ppa);
542 if (!test_bit(pos, line->blk_bitmap))
549 static int pblk_recov_check_line_version(struct pblk *pblk,
550 struct line_emeta *emeta)
552 struct line_header *header = &emeta->header;
554 if (header->version_major != EMETA_VERSION_MAJOR) {
555 pblk_err(pblk, "line major version mismatch: %d, expected: %d\n",
556 header->version_major, EMETA_VERSION_MAJOR);
560 #ifdef CONFIG_NVM_PBLK_DEBUG
561 if (header->version_minor > EMETA_VERSION_MINOR)
562 pblk_info(pblk, "newer line minor version found: %d\n",
563 header->version_minor);
569 static void pblk_recov_wa_counters(struct pblk *pblk,
570 struct line_emeta *emeta)
572 struct pblk_line_meta *lm = &pblk->lm;
573 struct line_header *header = &emeta->header;
574 struct wa_counters *wa = emeta_to_wa(lm, emeta);
576 /* WA counters were introduced in emeta version 0.2 */
577 if (header->version_major > 0 || header->version_minor >= 2) {
578 u64 user = le64_to_cpu(wa->user);
579 u64 pad = le64_to_cpu(wa->pad);
580 u64 gc = le64_to_cpu(wa->gc);
582 atomic64_set(&pblk->user_wa, user);
583 atomic64_set(&pblk->pad_wa, pad);
584 atomic64_set(&pblk->gc_wa, gc);
586 pblk->user_rst_wa = user;
587 pblk->pad_rst_wa = pad;
588 pblk->gc_rst_wa = gc;
592 static int pblk_line_was_written(struct pblk_line *line,
596 struct pblk_line_meta *lm = &pblk->lm;
597 struct nvm_tgt_dev *dev = pblk->dev;
598 struct nvm_geo *geo = &dev->geo;
599 struct nvm_chk_meta *chunk;
600 struct ppa_addr bppa;
603 if (line->state == PBLK_LINESTATE_BAD)
606 smeta_blk = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
607 if (smeta_blk >= lm->blk_per_line)
610 bppa = pblk->luns[smeta_blk].bppa;
611 chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)];
613 if (chunk->state & NVM_CHK_ST_FREE)
619 static bool pblk_line_is_open(struct pblk *pblk, struct pblk_line *line)
621 struct pblk_line_meta *lm = &pblk->lm;
624 for (i = 0; i < lm->blk_per_line; i++)
625 if (line->chks[i].state & NVM_CHK_ST_OPEN)
631 struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
633 struct pblk_line_meta *lm = &pblk->lm;
634 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
635 struct pblk_line *line, *tline, *data_line = NULL;
636 struct pblk_smeta *smeta;
637 struct pblk_emeta *emeta;
638 struct line_smeta *smeta_buf;
639 int found_lines = 0, recovered_lines = 0, open_lines = 0;
642 int i, valid_uuid = 0;
643 LIST_HEAD(recov_list);
645 /* TODO: Implement FTL snapshot */
647 /* Scan recovery - takes place when FTL snapshot fails */
648 spin_lock(&l_mg->free_lock);
649 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
650 set_bit(meta_line, &l_mg->meta_bitmap);
651 smeta = l_mg->sline_meta[meta_line];
652 emeta = l_mg->eline_meta[meta_line];
653 smeta_buf = (struct line_smeta *)smeta;
654 spin_unlock(&l_mg->free_lock);
656 /* Order data lines using their sequence number */
657 for (i = 0; i < l_mg->nr_lines; i++) {
660 line = &pblk->lines[i];
662 memset(smeta, 0, lm->smeta_len);
664 line->lun_bitmap = ((void *)(smeta_buf)) +
665 sizeof(struct line_smeta);
667 if (!pblk_line_was_written(line, pblk))
670 /* Lines that cannot be read are assumed as not written here */
671 if (pblk_line_smeta_read(pblk, line))
674 crc = pblk_calc_smeta_crc(pblk, smeta_buf);
675 if (le32_to_cpu(smeta_buf->crc) != crc)
678 if (le32_to_cpu(smeta_buf->header.identifier) != PBLK_MAGIC)
681 if (smeta_buf->header.version_major != SMETA_VERSION_MAJOR) {
682 pblk_err(pblk, "found incompatible line version %u\n",
683 smeta_buf->header.version_major);
684 return ERR_PTR(-EINVAL);
687 /* The first valid instance uuid is used for initialization */
689 memcpy(pblk->instance_uuid, smeta_buf->header.uuid, 16);
693 if (memcmp(pblk->instance_uuid, smeta_buf->header.uuid, 16)) {
694 pblk_debug(pblk, "ignore line %u due to uuid mismatch\n",
699 /* Update line metadata */
700 spin_lock(&line->lock);
701 line->id = le32_to_cpu(smeta_buf->header.id);
702 line->type = le16_to_cpu(smeta_buf->header.type);
703 line->seq_nr = le64_to_cpu(smeta_buf->seq_nr);
704 spin_unlock(&line->lock);
706 /* Update general metadata */
707 spin_lock(&l_mg->free_lock);
708 if (line->seq_nr >= l_mg->d_seq_nr)
709 l_mg->d_seq_nr = line->seq_nr + 1;
710 l_mg->nr_free_lines--;
711 spin_unlock(&l_mg->free_lock);
713 if (pblk_line_recov_alloc(pblk, line))
716 pblk_recov_line_add_ordered(&recov_list, line);
718 pblk_debug(pblk, "recovering data line %d, seq:%llu\n",
719 line->id, smeta_buf->seq_nr);
723 pblk_setup_uuid(pblk);
725 spin_lock(&l_mg->free_lock);
726 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
727 &l_mg->meta_bitmap));
728 spin_unlock(&l_mg->free_lock);
733 /* Verify closed blocks and recover this portion of L2P table*/
734 list_for_each_entry_safe(line, tline, &recov_list, list) {
737 line->emeta_ssec = pblk_line_emeta_start(pblk, line);
739 memset(line->emeta->buf, 0, lm->emeta_len[0]);
741 if (pblk_line_is_open(pblk, line)) {
742 pblk_recov_l2p_from_oob(pblk, line);
746 if (pblk_line_emeta_read(pblk, line, line->emeta->buf)) {
747 pblk_recov_l2p_from_oob(pblk, line);
751 if (pblk_recov_check_emeta(pblk, line->emeta->buf)) {
752 pblk_recov_l2p_from_oob(pblk, line);
756 if (pblk_recov_check_line_version(pblk, line->emeta->buf))
757 return ERR_PTR(-EINVAL);
759 pblk_recov_wa_counters(pblk, line->emeta->buf);
761 if (pblk_recov_l2p_from_emeta(pblk, line))
762 pblk_recov_l2p_from_oob(pblk, line);
765 if (pblk_line_is_full(line)) {
766 struct list_head *move_list;
768 spin_lock(&line->lock);
769 line->state = PBLK_LINESTATE_CLOSED;
770 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
772 move_list = pblk_line_gc_list(pblk, line);
773 spin_unlock(&line->lock);
775 spin_lock(&l_mg->gc_lock);
776 list_move_tail(&line->list, move_list);
777 spin_unlock(&l_mg->gc_lock);
779 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
780 line->map_bitmap = NULL;
784 spin_lock(&line->lock);
785 line->state = PBLK_LINESTATE_OPEN;
786 spin_unlock(&line->lock);
788 line->emeta->mem = 0;
789 atomic_set(&line->emeta->sync, 0);
791 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
795 line->meta_line = meta_line;
802 spin_lock(&l_mg->free_lock);
803 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
804 &l_mg->meta_bitmap));
805 spin_unlock(&l_mg->free_lock);
806 pblk_line_replace_data(pblk);
808 spin_lock(&l_mg->free_lock);
809 /* Allocate next line for preparation */
810 l_mg->data_next = pblk_line_get(pblk);
811 if (l_mg->data_next) {
812 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
813 l_mg->data_next->type = PBLK_LINETYPE_DATA;
816 spin_unlock(&l_mg->free_lock);
820 pblk_line_erase(pblk, l_mg->data_next);
823 if (found_lines != recovered_lines)
824 pblk_err(pblk, "failed to recover all found lines %d/%d\n",
825 found_lines, recovered_lines);
833 int pblk_recov_pad(struct pblk *pblk)
835 struct pblk_line *line;
836 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
840 spin_lock(&l_mg->free_lock);
841 line = l_mg->data_line;
842 left_msecs = line->left_msecs;
843 spin_unlock(&l_mg->free_lock);
845 ret = pblk_recov_pad_line(pblk, line, left_msecs);
847 pblk_err(pblk, "tear down padding failed (%d)\n", ret);
851 pblk_line_close_meta(pblk, line);