]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/mtd/nand/raw/nand_base.c
mtd: nand: Clarify Kconfig entry for software BCH ECC algorithm
[linux.git] / drivers / mtd / nand / raw / nand_base.c
1 /*
2  *  Overview:
3  *   This is the generic MTD driver for NAND flash devices. It should be
4  *   capable of working with almost all NAND chips currently available.
5  *
6  *      Additional technical information is available on
7  *      http://www.linux-mtd.infradead.org/doc/nand.html
8  *
9  *  Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
10  *                2002-2006 Thomas Gleixner (tglx@linutronix.de)
11  *
12  *  Credits:
13  *      David Woodhouse for adding multichip support
14  *
15  *      Aleph One Ltd. and Toby Churchill Ltd. for supporting the
16  *      rework for 2K page size chips
17  *
18  *  TODO:
19  *      Enable cached programming for 2k page size chips
20  *      Check, if mtd->ecctype should be set to MTD_ECC_HW
21  *      if we have HW ECC support.
22  *      BBT table is not serialized, has to be fixed
23  *
24  * This program is free software; you can redistribute it and/or modify
25  * it under the terms of the GNU General Public License version 2 as
26  * published by the Free Software Foundation.
27  *
28  */
29
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32 #include <linux/module.h>
33 #include <linux/delay.h>
34 #include <linux/errno.h>
35 #include <linux/err.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/mm.h>
39 #include <linux/types.h>
40 #include <linux/mtd/mtd.h>
41 #include <linux/mtd/nand_ecc.h>
42 #include <linux/mtd/nand_bch.h>
43 #include <linux/interrupt.h>
44 #include <linux/bitops.h>
45 #include <linux/io.h>
46 #include <linux/mtd/partitions.h>
47 #include <linux/of.h>
48 #include <linux/gpio/consumer.h>
49
50 #include "internals.h"
51
52 /* Define default oob placement schemes for large and small page devices */
53 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
54                                  struct mtd_oob_region *oobregion)
55 {
56         struct nand_chip *chip = mtd_to_nand(mtd);
57         struct nand_ecc_ctrl *ecc = &chip->ecc;
58
59         if (section > 1)
60                 return -ERANGE;
61
62         if (!section) {
63                 oobregion->offset = 0;
64                 if (mtd->oobsize == 16)
65                         oobregion->length = 4;
66                 else
67                         oobregion->length = 3;
68         } else {
69                 if (mtd->oobsize == 8)
70                         return -ERANGE;
71
72                 oobregion->offset = 6;
73                 oobregion->length = ecc->total - 4;
74         }
75
76         return 0;
77 }
78
79 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
80                                   struct mtd_oob_region *oobregion)
81 {
82         if (section > 1)
83                 return -ERANGE;
84
85         if (mtd->oobsize == 16) {
86                 if (section)
87                         return -ERANGE;
88
89                 oobregion->length = 8;
90                 oobregion->offset = 8;
91         } else {
92                 oobregion->length = 2;
93                 if (!section)
94                         oobregion->offset = 3;
95                 else
96                         oobregion->offset = 6;
97         }
98
99         return 0;
100 }
101
102 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
103         .ecc = nand_ooblayout_ecc_sp,
104         .free = nand_ooblayout_free_sp,
105 };
106 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
107
108 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
109                                  struct mtd_oob_region *oobregion)
110 {
111         struct nand_chip *chip = mtd_to_nand(mtd);
112         struct nand_ecc_ctrl *ecc = &chip->ecc;
113
114         if (section || !ecc->total)
115                 return -ERANGE;
116
117         oobregion->length = ecc->total;
118         oobregion->offset = mtd->oobsize - oobregion->length;
119
120         return 0;
121 }
122
123 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
124                                   struct mtd_oob_region *oobregion)
125 {
126         struct nand_chip *chip = mtd_to_nand(mtd);
127         struct nand_ecc_ctrl *ecc = &chip->ecc;
128
129         if (section)
130                 return -ERANGE;
131
132         oobregion->length = mtd->oobsize - ecc->total - 2;
133         oobregion->offset = 2;
134
135         return 0;
136 }
137
138 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
139         .ecc = nand_ooblayout_ecc_lp,
140         .free = nand_ooblayout_free_lp,
141 };
142 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
143
144 /*
145  * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
146  * are placed at a fixed offset.
147  */
148 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
149                                          struct mtd_oob_region *oobregion)
150 {
151         struct nand_chip *chip = mtd_to_nand(mtd);
152         struct nand_ecc_ctrl *ecc = &chip->ecc;
153
154         if (section)
155                 return -ERANGE;
156
157         switch (mtd->oobsize) {
158         case 64:
159                 oobregion->offset = 40;
160                 break;
161         case 128:
162                 oobregion->offset = 80;
163                 break;
164         default:
165                 return -EINVAL;
166         }
167
168         oobregion->length = ecc->total;
169         if (oobregion->offset + oobregion->length > mtd->oobsize)
170                 return -ERANGE;
171
172         return 0;
173 }
174
175 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
176                                           struct mtd_oob_region *oobregion)
177 {
178         struct nand_chip *chip = mtd_to_nand(mtd);
179         struct nand_ecc_ctrl *ecc = &chip->ecc;
180         int ecc_offset = 0;
181
182         if (section < 0 || section > 1)
183                 return -ERANGE;
184
185         switch (mtd->oobsize) {
186         case 64:
187                 ecc_offset = 40;
188                 break;
189         case 128:
190                 ecc_offset = 80;
191                 break;
192         default:
193                 return -EINVAL;
194         }
195
196         if (section == 0) {
197                 oobregion->offset = 2;
198                 oobregion->length = ecc_offset - 2;
199         } else {
200                 oobregion->offset = ecc_offset + ecc->total;
201                 oobregion->length = mtd->oobsize - oobregion->offset;
202         }
203
204         return 0;
205 }
206
207 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
208         .ecc = nand_ooblayout_ecc_lp_hamming,
209         .free = nand_ooblayout_free_lp_hamming,
210 };
211
212 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
213 {
214         int ret = 0;
215
216         /* Start address must align on block boundary */
217         if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
218                 pr_debug("%s: unaligned address\n", __func__);
219                 ret = -EINVAL;
220         }
221
222         /* Length must align on block boundary */
223         if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
224                 pr_debug("%s: length not block aligned\n", __func__);
225                 ret = -EINVAL;
226         }
227
228         return ret;
229 }
230
231 /**
232  * nand_select_target() - Select a NAND target (A.K.A. die)
233  * @chip: NAND chip object
234  * @cs: the CS line to select. Note that this CS id is always from the chip
235  *      PoV, not the controller one
236  *
237  * Select a NAND target so that further operations executed on @chip go to the
238  * selected NAND target.
239  */
240 void nand_select_target(struct nand_chip *chip, unsigned int cs)
241 {
242         /*
243          * cs should always lie between 0 and nanddev_ntargets(), when that's
244          * not the case it's a bug and the caller should be fixed.
245          */
246         if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
247                 return;
248
249         chip->cur_cs = cs;
250
251         if (chip->legacy.select_chip)
252                 chip->legacy.select_chip(chip, cs);
253 }
254 EXPORT_SYMBOL_GPL(nand_select_target);
255
256 /**
257  * nand_deselect_target() - Deselect the currently selected target
258  * @chip: NAND chip object
259  *
260  * Deselect the currently selected NAND target. The result of operations
261  * executed on @chip after the target has been deselected is undefined.
262  */
263 void nand_deselect_target(struct nand_chip *chip)
264 {
265         if (chip->legacy.select_chip)
266                 chip->legacy.select_chip(chip, -1);
267
268         chip->cur_cs = -1;
269 }
270 EXPORT_SYMBOL_GPL(nand_deselect_target);
271
272 /**
273  * nand_release_device - [GENERIC] release chip
274  * @chip: NAND chip object
275  *
276  * Release chip lock and wake up anyone waiting on the device.
277  */
278 static void nand_release_device(struct nand_chip *chip)
279 {
280         /* Release the controller and the chip */
281         mutex_unlock(&chip->controller->lock);
282         mutex_unlock(&chip->lock);
283 }
284
285 /**
286  * nand_block_bad - [DEFAULT] Read bad block marker from the chip
287  * @chip: NAND chip object
288  * @ofs: offset from device start
289  *
290  * Check, if the block is bad.
291  */
292 static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
293 {
294         struct mtd_info *mtd = nand_to_mtd(chip);
295         int page, page_end, res;
296         u8 bad;
297
298         if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
299                 ofs += mtd->erasesize - mtd->writesize;
300
301         page = (int)(ofs >> chip->page_shift) & chip->pagemask;
302         page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
303
304         for (; page < page_end; page++) {
305                 res = chip->ecc.read_oob(chip, page);
306                 if (res < 0)
307                         return res;
308
309                 bad = chip->oob_poi[chip->badblockpos];
310
311                 if (likely(chip->badblockbits == 8))
312                         res = bad != 0xFF;
313                 else
314                         res = hweight8(bad) < chip->badblockbits;
315                 if (res)
316                         return res;
317         }
318
319         return 0;
320 }
321
322 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
323 {
324         if (chip->legacy.block_bad)
325                 return chip->legacy.block_bad(chip, ofs);
326
327         return nand_block_bad(chip, ofs);
328 }
329
330 /**
331  * nand_get_device - [GENERIC] Get chip for selected access
332  * @chip: NAND chip structure
333  *
334  * Lock the device and its controller for exclusive access
335  *
336  * Return: -EBUSY if the chip has been suspended, 0 otherwise
337  */
338 static int nand_get_device(struct nand_chip *chip)
339 {
340         mutex_lock(&chip->lock);
341         if (chip->suspended) {
342                 mutex_unlock(&chip->lock);
343                 return -EBUSY;
344         }
345         mutex_lock(&chip->controller->lock);
346
347         return 0;
348 }
349
350 /**
351  * nand_check_wp - [GENERIC] check if the chip is write protected
352  * @chip: NAND chip object
353  *
354  * Check, if the device is write protected. The function expects, that the
355  * device is already selected.
356  */
357 static int nand_check_wp(struct nand_chip *chip)
358 {
359         u8 status;
360         int ret;
361
362         /* Broken xD cards report WP despite being writable */
363         if (chip->options & NAND_BROKEN_XD)
364                 return 0;
365
366         /* Check the WP bit */
367         ret = nand_status_op(chip, &status);
368         if (ret)
369                 return ret;
370
371         return status & NAND_STATUS_WP ? 0 : 1;
372 }
373
374 /**
375  * nand_fill_oob - [INTERN] Transfer client buffer to oob
376  * @chip: NAND chip object
377  * @oob: oob data buffer
378  * @len: oob data write length
379  * @ops: oob ops structure
380  */
381 static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
382                               struct mtd_oob_ops *ops)
383 {
384         struct mtd_info *mtd = nand_to_mtd(chip);
385         int ret;
386
387         /*
388          * Initialise to all 0xFF, to avoid the possibility of left over OOB
389          * data from a previous OOB read.
390          */
391         memset(chip->oob_poi, 0xff, mtd->oobsize);
392
393         switch (ops->mode) {
394
395         case MTD_OPS_PLACE_OOB:
396         case MTD_OPS_RAW:
397                 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
398                 return oob + len;
399
400         case MTD_OPS_AUTO_OOB:
401                 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
402                                                   ops->ooboffs, len);
403                 BUG_ON(ret);
404                 return oob + len;
405
406         default:
407                 BUG();
408         }
409         return NULL;
410 }
411
412 /**
413  * nand_do_write_oob - [MTD Interface] NAND write out-of-band
414  * @chip: NAND chip object
415  * @to: offset to write to
416  * @ops: oob operation description structure
417  *
418  * NAND write out-of-band.
419  */
420 static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
421                              struct mtd_oob_ops *ops)
422 {
423         struct mtd_info *mtd = nand_to_mtd(chip);
424         int chipnr, page, status, len, ret;
425
426         pr_debug("%s: to = 0x%08x, len = %i\n",
427                          __func__, (unsigned int)to, (int)ops->ooblen);
428
429         len = mtd_oobavail(mtd, ops);
430
431         /* Do not allow write past end of page */
432         if ((ops->ooboffs + ops->ooblen) > len) {
433                 pr_debug("%s: attempt to write past end of page\n",
434                                 __func__);
435                 return -EINVAL;
436         }
437
438         chipnr = (int)(to >> chip->chip_shift);
439
440         /*
441          * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
442          * of my DiskOnChip 2000 test units) will clear the whole data page too
443          * if we don't do this. I have no clue why, but I seem to have 'fixed'
444          * it in the doc2000 driver in August 1999.  dwmw2.
445          */
446         ret = nand_reset(chip, chipnr);
447         if (ret)
448                 return ret;
449
450         nand_select_target(chip, chipnr);
451
452         /* Shift to get page */
453         page = (int)(to >> chip->page_shift);
454
455         /* Check, if it is write protected */
456         if (nand_check_wp(chip)) {
457                 nand_deselect_target(chip);
458                 return -EROFS;
459         }
460
461         /* Invalidate the page cache, if we write to the cached page */
462         if (page == chip->pagecache.page)
463                 chip->pagecache.page = -1;
464
465         nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
466
467         if (ops->mode == MTD_OPS_RAW)
468                 status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
469         else
470                 status = chip->ecc.write_oob(chip, page & chip->pagemask);
471
472         nand_deselect_target(chip);
473
474         if (status)
475                 return status;
476
477         ops->oobretlen = ops->ooblen;
478
479         return 0;
480 }
481
482 /**
483  * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
484  * @chip: NAND chip object
485  * @ofs: offset from device start
486  *
487  * This is the default implementation, which can be overridden by a hardware
488  * specific driver. It provides the details for writing a bad block marker to a
489  * block.
490  */
491 static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
492 {
493         struct mtd_info *mtd = nand_to_mtd(chip);
494         struct mtd_oob_ops ops;
495         uint8_t buf[2] = { 0, 0 };
496         int ret = 0, res, i = 0;
497
498         memset(&ops, 0, sizeof(ops));
499         ops.oobbuf = buf;
500         ops.ooboffs = chip->badblockpos;
501         if (chip->options & NAND_BUSWIDTH_16) {
502                 ops.ooboffs &= ~0x01;
503                 ops.len = ops.ooblen = 2;
504         } else {
505                 ops.len = ops.ooblen = 1;
506         }
507         ops.mode = MTD_OPS_PLACE_OOB;
508
509         /* Write to first/last page(s) if necessary */
510         if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
511                 ofs += mtd->erasesize - mtd->writesize;
512         do {
513                 res = nand_do_write_oob(chip, ofs, &ops);
514                 if (!ret)
515                         ret = res;
516
517                 i++;
518                 ofs += mtd->writesize;
519         } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
520
521         return ret;
522 }
523
524 /**
525  * nand_markbad_bbm - mark a block by updating the BBM
526  * @chip: NAND chip object
527  * @ofs: offset of the block to mark bad
528  */
529 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
530 {
531         if (chip->legacy.block_markbad)
532                 return chip->legacy.block_markbad(chip, ofs);
533
534         return nand_default_block_markbad(chip, ofs);
535 }
536
537 /**
538  * nand_block_markbad_lowlevel - mark a block bad
539  * @chip: NAND chip object
540  * @ofs: offset from device start
541  *
542  * This function performs the generic NAND bad block marking steps (i.e., bad
543  * block table(s) and/or marker(s)). We only allow the hardware driver to
544  * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
545  *
546  * We try operations in the following order:
547  *
548  *  (1) erase the affected block, to allow OOB marker to be written cleanly
549  *  (2) write bad block marker to OOB area of affected block (unless flag
550  *      NAND_BBT_NO_OOB_BBM is present)
551  *  (3) update the BBT
552  *
553  * Note that we retain the first error encountered in (2) or (3), finish the
554  * procedures, and dump the error in the end.
555 */
556 static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
557 {
558         struct mtd_info *mtd = nand_to_mtd(chip);
559         int res, ret = 0;
560
561         if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
562                 struct erase_info einfo;
563
564                 /* Attempt erase before marking OOB */
565                 memset(&einfo, 0, sizeof(einfo));
566                 einfo.addr = ofs;
567                 einfo.len = 1ULL << chip->phys_erase_shift;
568                 nand_erase_nand(chip, &einfo, 0);
569
570                 /* Write bad block marker to OOB */
571                 ret = nand_get_device(chip);
572                 if (ret)
573                         return ret;
574
575                 ret = nand_markbad_bbm(chip, ofs);
576                 nand_release_device(chip);
577         }
578
579         /* Mark block bad in BBT */
580         if (chip->bbt) {
581                 res = nand_markbad_bbt(chip, ofs);
582                 if (!ret)
583                         ret = res;
584         }
585
586         if (!ret)
587                 mtd->ecc_stats.badblocks++;
588
589         return ret;
590 }
591
592 /**
593  * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
594  * @mtd: MTD device structure
595  * @ofs: offset from device start
596  *
597  * Check if the block is marked as reserved.
598  */
599 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
600 {
601         struct nand_chip *chip = mtd_to_nand(mtd);
602
603         if (!chip->bbt)
604                 return 0;
605         /* Return info from the table */
606         return nand_isreserved_bbt(chip, ofs);
607 }
608
609 /**
610  * nand_block_checkbad - [GENERIC] Check if a block is marked bad
611  * @chip: NAND chip object
612  * @ofs: offset from device start
613  * @allowbbt: 1, if its allowed to access the bbt area
614  *
615  * Check, if the block is bad. Either by reading the bad block table or
616  * calling of the scan function.
617  */
618 static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
619 {
620         /* Return info from the table */
621         if (chip->bbt)
622                 return nand_isbad_bbt(chip, ofs, allowbbt);
623
624         return nand_isbad_bbm(chip, ofs);
625 }
626
627 /**
628  * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
629  * @chip: NAND chip structure
630  * @timeout_ms: Timeout in ms
631  *
632  * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
633  * If that does not happen whitin the specified timeout, -ETIMEDOUT is
634  * returned.
635  *
636  * This helper is intended to be used when the controller does not have access
637  * to the NAND R/B pin.
638  *
639  * Be aware that calling this helper from an ->exec_op() implementation means
640  * ->exec_op() must be re-entrant.
641  *
642  * Return 0 if the NAND chip is ready, a negative error otherwise.
643  */
644 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
645 {
646         const struct nand_sdr_timings *timings;
647         u8 status = 0;
648         int ret;
649
650         if (!nand_has_exec_op(chip))
651                 return -ENOTSUPP;
652
653         /* Wait tWB before polling the STATUS reg. */
654         timings = nand_get_sdr_timings(&chip->data_interface);
655         ndelay(PSEC_TO_NSEC(timings->tWB_max));
656
657         ret = nand_status_op(chip, NULL);
658         if (ret)
659                 return ret;
660
661         timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
662         do {
663                 ret = nand_read_data_op(chip, &status, sizeof(status), true);
664                 if (ret)
665                         break;
666
667                 if (status & NAND_STATUS_READY)
668                         break;
669
670                 /*
671                  * Typical lowest execution time for a tR on most NANDs is 10us,
672                  * use this as polling delay before doing something smarter (ie.
673                  * deriving a delay from the timeout value, timeout_ms/ratio).
674                  */
675                 udelay(10);
676         } while (time_before(jiffies, timeout_ms));
677
678         /*
679          * We have to exit READ_STATUS mode in order to read real data on the
680          * bus in case the WAITRDY instruction is preceding a DATA_IN
681          * instruction.
682          */
683         nand_exit_status_op(chip);
684
685         if (ret)
686                 return ret;
687
688         return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
689 };
690 EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
691
692 /**
693  * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
694  * @chip: NAND chip structure
695  * @gpiod: GPIO descriptor of R/B pin
696  * @timeout_ms: Timeout in ms
697  *
698  * Poll the R/B GPIO pin until it becomes ready. If that does not happen
699  * whitin the specified timeout, -ETIMEDOUT is returned.
700  *
701  * This helper is intended to be used when the controller has access to the
702  * NAND R/B pin over GPIO.
703  *
704  * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
705  */
706 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
707                       unsigned long timeout_ms)
708 {
709         /* Wait until R/B pin indicates chip is ready or timeout occurs */
710         timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
711         do {
712                 if (gpiod_get_value_cansleep(gpiod))
713                         return 0;
714
715                 cond_resched();
716         } while (time_before(jiffies, timeout_ms));
717
718         return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
719 };
720 EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
721
722 /**
723  * panic_nand_wait - [GENERIC] wait until the command is done
724  * @chip: NAND chip structure
725  * @timeo: timeout
726  *
727  * Wait for command done. This is a helper function for nand_wait used when
728  * we are in interrupt context. May happen when in panic and trying to write
729  * an oops through mtdoops.
730  */
731 void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
732 {
733         int i;
734         for (i = 0; i < timeo; i++) {
735                 if (chip->legacy.dev_ready) {
736                         if (chip->legacy.dev_ready(chip))
737                                 break;
738                 } else {
739                         int ret;
740                         u8 status;
741
742                         ret = nand_read_data_op(chip, &status, sizeof(status),
743                                                 true);
744                         if (ret)
745                                 return;
746
747                         if (status & NAND_STATUS_READY)
748                                 break;
749                 }
750                 mdelay(1);
751         }
752 }
753
754 static bool nand_supports_get_features(struct nand_chip *chip, int addr)
755 {
756         return (chip->parameters.supports_set_get_features &&
757                 test_bit(addr, chip->parameters.get_feature_list));
758 }
759
760 static bool nand_supports_set_features(struct nand_chip *chip, int addr)
761 {
762         return (chip->parameters.supports_set_get_features &&
763                 test_bit(addr, chip->parameters.set_feature_list));
764 }
765
766 /**
767  * nand_reset_data_interface - Reset data interface and timings
768  * @chip: The NAND chip
769  * @chipnr: Internal die id
770  *
771  * Reset the Data interface and timings to ONFI mode 0.
772  *
773  * Returns 0 for success or negative error code otherwise.
774  */
775 static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
776 {
777         int ret;
778
779         if (!nand_has_setup_data_iface(chip))
780                 return 0;
781
782         /*
783          * The ONFI specification says:
784          * "
785          * To transition from NV-DDR or NV-DDR2 to the SDR data
786          * interface, the host shall use the Reset (FFh) command
787          * using SDR timing mode 0. A device in any timing mode is
788          * required to recognize Reset (FFh) command issued in SDR
789          * timing mode 0.
790          * "
791          *
792          * Configure the data interface in SDR mode and set the
793          * timings to timing mode 0.
794          */
795
796         onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
797         ret = chip->controller->ops->setup_data_interface(chip, chipnr,
798                                                         &chip->data_interface);
799         if (ret)
800                 pr_err("Failed to configure data interface to SDR timing mode 0\n");
801
802         return ret;
803 }
804
805 /**
806  * nand_setup_data_interface - Setup the best data interface and timings
807  * @chip: The NAND chip
808  * @chipnr: Internal die id
809  *
810  * Find and configure the best data interface and NAND timings supported by
811  * the chip and the driver.
812  * First tries to retrieve supported timing modes from ONFI information,
813  * and if the NAND chip does not support ONFI, relies on the
814  * ->onfi_timing_mode_default specified in the nand_ids table.
815  *
816  * Returns 0 for success or negative error code otherwise.
817  */
818 static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
819 {
820         u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
821                 chip->onfi_timing_mode_default,
822         };
823         int ret;
824
825         if (!nand_has_setup_data_iface(chip))
826                 return 0;
827
828         /* Change the mode on the chip side (if supported by the NAND chip) */
829         if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
830                 nand_select_target(chip, chipnr);
831                 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
832                                         tmode_param);
833                 nand_deselect_target(chip);
834                 if (ret)
835                         return ret;
836         }
837
838         /* Change the mode on the controller side */
839         ret = chip->controller->ops->setup_data_interface(chip, chipnr,
840                                                         &chip->data_interface);
841         if (ret)
842                 return ret;
843
844         /* Check the mode has been accepted by the chip, if supported */
845         if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
846                 return 0;
847
848         memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
849         nand_select_target(chip, chipnr);
850         ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
851                                 tmode_param);
852         nand_deselect_target(chip);
853         if (ret)
854                 goto err_reset_chip;
855
856         if (tmode_param[0] != chip->onfi_timing_mode_default) {
857                 pr_warn("timing mode %d not acknowledged by the NAND chip\n",
858                         chip->onfi_timing_mode_default);
859                 goto err_reset_chip;
860         }
861
862         return 0;
863
864 err_reset_chip:
865         /*
866          * Fallback to mode 0 if the chip explicitly did not ack the chosen
867          * timing mode.
868          */
869         nand_reset_data_interface(chip, chipnr);
870         nand_select_target(chip, chipnr);
871         nand_reset_op(chip);
872         nand_deselect_target(chip);
873
874         return ret;
875 }
876
877 /**
878  * nand_init_data_interface - find the best data interface and timings
879  * @chip: The NAND chip
880  *
881  * Find the best data interface and NAND timings supported by the chip
882  * and the driver.
883  * First tries to retrieve supported timing modes from ONFI information,
884  * and if the NAND chip does not support ONFI, relies on the
885  * ->onfi_timing_mode_default specified in the nand_ids table. After this
886  * function nand_chip->data_interface is initialized with the best timing mode
887  * available.
888  *
889  * Returns 0 for success or negative error code otherwise.
890  */
891 static int nand_init_data_interface(struct nand_chip *chip)
892 {
893         int modes, mode, ret;
894
895         if (!nand_has_setup_data_iface(chip))
896                 return 0;
897
898         /*
899          * First try to identify the best timings from ONFI parameters and
900          * if the NAND does not support ONFI, fallback to the default ONFI
901          * timing mode.
902          */
903         if (chip->parameters.onfi) {
904                 modes = chip->parameters.onfi->async_timing_mode;
905         } else {
906                 if (!chip->onfi_timing_mode_default)
907                         return 0;
908
909                 modes = GENMASK(chip->onfi_timing_mode_default, 0);
910         }
911
912         for (mode = fls(modes) - 1; mode >= 0; mode--) {
913                 ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
914                 if (ret)
915                         continue;
916
917                 /*
918                  * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
919                  * controller supports the requested timings.
920                  */
921                 ret = chip->controller->ops->setup_data_interface(chip,
922                                                  NAND_DATA_IFACE_CHECK_ONLY,
923                                                  &chip->data_interface);
924                 if (!ret) {
925                         chip->onfi_timing_mode_default = mode;
926                         break;
927                 }
928         }
929
930         return 0;
931 }
932
933 /**
934  * nand_fill_column_cycles - fill the column cycles of an address
935  * @chip: The NAND chip
936  * @addrs: Array of address cycles to fill
937  * @offset_in_page: The offset in the page
938  *
939  * Fills the first or the first two bytes of the @addrs field depending
940  * on the NAND bus width and the page size.
941  *
942  * Returns the number of cycles needed to encode the column, or a negative
943  * error code in case one of the arguments is invalid.
944  */
945 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
946                                    unsigned int offset_in_page)
947 {
948         struct mtd_info *mtd = nand_to_mtd(chip);
949
950         /* Make sure the offset is less than the actual page size. */
951         if (offset_in_page > mtd->writesize + mtd->oobsize)
952                 return -EINVAL;
953
954         /*
955          * On small page NANDs, there's a dedicated command to access the OOB
956          * area, and the column address is relative to the start of the OOB
957          * area, not the start of the page. Asjust the address accordingly.
958          */
959         if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
960                 offset_in_page -= mtd->writesize;
961
962         /*
963          * The offset in page is expressed in bytes, if the NAND bus is 16-bit
964          * wide, then it must be divided by 2.
965          */
966         if (chip->options & NAND_BUSWIDTH_16) {
967                 if (WARN_ON(offset_in_page % 2))
968                         return -EINVAL;
969
970                 offset_in_page /= 2;
971         }
972
973         addrs[0] = offset_in_page;
974
975         /*
976          * Small page NANDs use 1 cycle for the columns, while large page NANDs
977          * need 2
978          */
979         if (mtd->writesize <= 512)
980                 return 1;
981
982         addrs[1] = offset_in_page >> 8;
983
984         return 2;
985 }
986
987 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
988                                      unsigned int offset_in_page, void *buf,
989                                      unsigned int len)
990 {
991         struct mtd_info *mtd = nand_to_mtd(chip);
992         const struct nand_sdr_timings *sdr =
993                 nand_get_sdr_timings(&chip->data_interface);
994         u8 addrs[4];
995         struct nand_op_instr instrs[] = {
996                 NAND_OP_CMD(NAND_CMD_READ0, 0),
997                 NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
998                 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
999                                  PSEC_TO_NSEC(sdr->tRR_min)),
1000                 NAND_OP_DATA_IN(len, buf, 0),
1001         };
1002         struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1003         int ret;
1004
1005         /* Drop the DATA_IN instruction if len is set to 0. */
1006         if (!len)
1007                 op.ninstrs--;
1008
1009         if (offset_in_page >= mtd->writesize)
1010                 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1011         else if (offset_in_page >= 256 &&
1012                  !(chip->options & NAND_BUSWIDTH_16))
1013                 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1014
1015         ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1016         if (ret < 0)
1017                 return ret;
1018
1019         addrs[1] = page;
1020         addrs[2] = page >> 8;
1021
1022         if (chip->options & NAND_ROW_ADDR_3) {
1023                 addrs[3] = page >> 16;
1024                 instrs[1].ctx.addr.naddrs++;
1025         }
1026
1027         return nand_exec_op(chip, &op);
1028 }
1029
1030 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1031                                      unsigned int offset_in_page, void *buf,
1032                                      unsigned int len)
1033 {
1034         const struct nand_sdr_timings *sdr =
1035                 nand_get_sdr_timings(&chip->data_interface);
1036         u8 addrs[5];
1037         struct nand_op_instr instrs[] = {
1038                 NAND_OP_CMD(NAND_CMD_READ0, 0),
1039                 NAND_OP_ADDR(4, addrs, 0),
1040                 NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
1041                 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1042                                  PSEC_TO_NSEC(sdr->tRR_min)),
1043                 NAND_OP_DATA_IN(len, buf, 0),
1044         };
1045         struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1046         int ret;
1047
1048         /* Drop the DATA_IN instruction if len is set to 0. */
1049         if (!len)
1050                 op.ninstrs--;
1051
1052         ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1053         if (ret < 0)
1054                 return ret;
1055
1056         addrs[2] = page;
1057         addrs[3] = page >> 8;
1058
1059         if (chip->options & NAND_ROW_ADDR_3) {
1060                 addrs[4] = page >> 16;
1061                 instrs[1].ctx.addr.naddrs++;
1062         }
1063
1064         return nand_exec_op(chip, &op);
1065 }
1066
1067 /**
1068  * nand_read_page_op - Do a READ PAGE operation
1069  * @chip: The NAND chip
1070  * @page: page to read
1071  * @offset_in_page: offset within the page
1072  * @buf: buffer used to store the data
1073  * @len: length of the buffer
1074  *
1075  * This function issues a READ PAGE operation.
1076  * This function does not select/unselect the CS line.
1077  *
1078  * Returns 0 on success, a negative error code otherwise.
1079  */
1080 int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1081                       unsigned int offset_in_page, void *buf, unsigned int len)
1082 {
1083         struct mtd_info *mtd = nand_to_mtd(chip);
1084
1085         if (len && !buf)
1086                 return -EINVAL;
1087
1088         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1089                 return -EINVAL;
1090
1091         if (nand_has_exec_op(chip)) {
1092                 if (mtd->writesize > 512)
1093                         return nand_lp_exec_read_page_op(chip, page,
1094                                                          offset_in_page, buf,
1095                                                          len);
1096
1097                 return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1098                                                  buf, len);
1099         }
1100
1101         chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
1102         if (len)
1103                 chip->legacy.read_buf(chip, buf, len);
1104
1105         return 0;
1106 }
1107 EXPORT_SYMBOL_GPL(nand_read_page_op);
1108
1109 /**
1110  * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
1111  * @chip: The NAND chip
1112  * @page: parameter page to read
1113  * @buf: buffer used to store the data
1114  * @len: length of the buffer
1115  *
1116  * This function issues a READ PARAMETER PAGE operation.
1117  * This function does not select/unselect the CS line.
1118  *
1119  * Returns 0 on success, a negative error code otherwise.
1120  */
1121 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1122                             unsigned int len)
1123 {
1124         unsigned int i;
1125         u8 *p = buf;
1126
1127         if (len && !buf)
1128                 return -EINVAL;
1129
1130         if (nand_has_exec_op(chip)) {
1131                 const struct nand_sdr_timings *sdr =
1132                         nand_get_sdr_timings(&chip->data_interface);
1133                 struct nand_op_instr instrs[] = {
1134                         NAND_OP_CMD(NAND_CMD_PARAM, 0),
1135                         NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
1136                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1137                                          PSEC_TO_NSEC(sdr->tRR_min)),
1138                         NAND_OP_8BIT_DATA_IN(len, buf, 0),
1139                 };
1140                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1141
1142                 /* Drop the DATA_IN instruction if len is set to 0. */
1143                 if (!len)
1144                         op.ninstrs--;
1145
1146                 return nand_exec_op(chip, &op);
1147         }
1148
1149         chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
1150         for (i = 0; i < len; i++)
1151                 p[i] = chip->legacy.read_byte(chip);
1152
1153         return 0;
1154 }
1155
1156 /**
1157  * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
1158  * @chip: The NAND chip
1159  * @offset_in_page: offset within the page
1160  * @buf: buffer used to store the data
1161  * @len: length of the buffer
1162  * @force_8bit: force 8-bit bus access
1163  *
1164  * This function issues a CHANGE READ COLUMN operation.
1165  * This function does not select/unselect the CS line.
1166  *
1167  * Returns 0 on success, a negative error code otherwise.
1168  */
1169 int nand_change_read_column_op(struct nand_chip *chip,
1170                                unsigned int offset_in_page, void *buf,
1171                                unsigned int len, bool force_8bit)
1172 {
1173         struct mtd_info *mtd = nand_to_mtd(chip);
1174
1175         if (len && !buf)
1176                 return -EINVAL;
1177
1178         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1179                 return -EINVAL;
1180
1181         /* Small page NANDs do not support column change. */
1182         if (mtd->writesize <= 512)
1183                 return -ENOTSUPP;
1184
1185         if (nand_has_exec_op(chip)) {
1186                 const struct nand_sdr_timings *sdr =
1187                         nand_get_sdr_timings(&chip->data_interface);
1188                 u8 addrs[2] = {};
1189                 struct nand_op_instr instrs[] = {
1190                         NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1191                         NAND_OP_ADDR(2, addrs, 0),
1192                         NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1193                                     PSEC_TO_NSEC(sdr->tCCS_min)),
1194                         NAND_OP_DATA_IN(len, buf, 0),
1195                 };
1196                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1197                 int ret;
1198
1199                 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1200                 if (ret < 0)
1201                         return ret;
1202
1203                 /* Drop the DATA_IN instruction if len is set to 0. */
1204                 if (!len)
1205                         op.ninstrs--;
1206
1207                 instrs[3].ctx.data.force_8bit = force_8bit;
1208
1209                 return nand_exec_op(chip, &op);
1210         }
1211
1212         chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
1213         if (len)
1214                 chip->legacy.read_buf(chip, buf, len);
1215
1216         return 0;
1217 }
1218 EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1219
1220 /**
1221  * nand_read_oob_op - Do a READ OOB operation
1222  * @chip: The NAND chip
1223  * @page: page to read
1224  * @offset_in_oob: offset within the OOB area
1225  * @buf: buffer used to store the data
1226  * @len: length of the buffer
1227  *
1228  * This function issues a READ OOB operation.
1229  * This function does not select/unselect the CS line.
1230  *
1231  * Returns 0 on success, a negative error code otherwise.
1232  */
1233 int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1234                      unsigned int offset_in_oob, void *buf, unsigned int len)
1235 {
1236         struct mtd_info *mtd = nand_to_mtd(chip);
1237
1238         if (len && !buf)
1239                 return -EINVAL;
1240
1241         if (offset_in_oob + len > mtd->oobsize)
1242                 return -EINVAL;
1243
1244         if (nand_has_exec_op(chip))
1245                 return nand_read_page_op(chip, page,
1246                                          mtd->writesize + offset_in_oob,
1247                                          buf, len);
1248
1249         chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
1250         if (len)
1251                 chip->legacy.read_buf(chip, buf, len);
1252
1253         return 0;
1254 }
1255 EXPORT_SYMBOL_GPL(nand_read_oob_op);
1256
1257 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1258                                   unsigned int offset_in_page, const void *buf,
1259                                   unsigned int len, bool prog)
1260 {
1261         struct mtd_info *mtd = nand_to_mtd(chip);
1262         const struct nand_sdr_timings *sdr =
1263                 nand_get_sdr_timings(&chip->data_interface);
1264         u8 addrs[5] = {};
1265         struct nand_op_instr instrs[] = {
1266                 /*
1267                  * The first instruction will be dropped if we're dealing
1268                  * with a large page NAND and adjusted if we're dealing
1269                  * with a small page NAND and the page offset is > 255.
1270                  */
1271                 NAND_OP_CMD(NAND_CMD_READ0, 0),
1272                 NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1273                 NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
1274                 NAND_OP_DATA_OUT(len, buf, 0),
1275                 NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
1276                 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1277         };
1278         struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1279         int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1280         int ret;
1281         u8 status;
1282
1283         if (naddrs < 0)
1284                 return naddrs;
1285
1286         addrs[naddrs++] = page;
1287         addrs[naddrs++] = page >> 8;
1288         if (chip->options & NAND_ROW_ADDR_3)
1289                 addrs[naddrs++] = page >> 16;
1290
1291         instrs[2].ctx.addr.naddrs = naddrs;
1292
1293         /* Drop the last two instructions if we're not programming the page. */
1294         if (!prog) {
1295                 op.ninstrs -= 2;
1296                 /* Also drop the DATA_OUT instruction if empty. */
1297                 if (!len)
1298                         op.ninstrs--;
1299         }
1300
1301         if (mtd->writesize <= 512) {
1302                 /*
1303                  * Small pages need some more tweaking: we have to adjust the
1304                  * first instruction depending on the page offset we're trying
1305                  * to access.
1306                  */
1307                 if (offset_in_page >= mtd->writesize)
1308                         instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1309                 else if (offset_in_page >= 256 &&
1310                          !(chip->options & NAND_BUSWIDTH_16))
1311                         instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1312         } else {
1313                 /*
1314                  * Drop the first command if we're dealing with a large page
1315                  * NAND.
1316                  */
1317                 op.instrs++;
1318                 op.ninstrs--;
1319         }
1320
1321         ret = nand_exec_op(chip, &op);
1322         if (!prog || ret)
1323                 return ret;
1324
1325         ret = nand_status_op(chip, &status);
1326         if (ret)
1327                 return ret;
1328
1329         return status;
1330 }
1331
1332 /**
1333  * nand_prog_page_begin_op - starts a PROG PAGE operation
1334  * @chip: The NAND chip
1335  * @page: page to write
1336  * @offset_in_page: offset within the page
1337  * @buf: buffer containing the data to write to the page
1338  * @len: length of the buffer
1339  *
1340  * This function issues the first half of a PROG PAGE operation.
1341  * This function does not select/unselect the CS line.
1342  *
1343  * Returns 0 on success, a negative error code otherwise.
1344  */
1345 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1346                             unsigned int offset_in_page, const void *buf,
1347                             unsigned int len)
1348 {
1349         struct mtd_info *mtd = nand_to_mtd(chip);
1350
1351         if (len && !buf)
1352                 return -EINVAL;
1353
1354         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1355                 return -EINVAL;
1356
1357         if (nand_has_exec_op(chip))
1358                 return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1359                                               len, false);
1360
1361         chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
1362
1363         if (buf)
1364                 chip->legacy.write_buf(chip, buf, len);
1365
1366         return 0;
1367 }
1368 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1369
1370 /**
1371  * nand_prog_page_end_op - ends a PROG PAGE operation
1372  * @chip: The NAND chip
1373  *
1374  * This function issues the second half of a PROG PAGE operation.
1375  * This function does not select/unselect the CS line.
1376  *
1377  * Returns 0 on success, a negative error code otherwise.
1378  */
1379 int nand_prog_page_end_op(struct nand_chip *chip)
1380 {
1381         int ret;
1382         u8 status;
1383
1384         if (nand_has_exec_op(chip)) {
1385                 const struct nand_sdr_timings *sdr =
1386                         nand_get_sdr_timings(&chip->data_interface);
1387                 struct nand_op_instr instrs[] = {
1388                         NAND_OP_CMD(NAND_CMD_PAGEPROG,
1389                                     PSEC_TO_NSEC(sdr->tWB_max)),
1390                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1391                 };
1392                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1393
1394                 ret = nand_exec_op(chip, &op);
1395                 if (ret)
1396                         return ret;
1397
1398                 ret = nand_status_op(chip, &status);
1399                 if (ret)
1400                         return ret;
1401         } else {
1402                 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1403                 ret = chip->legacy.waitfunc(chip);
1404                 if (ret < 0)
1405                         return ret;
1406
1407                 status = ret;
1408         }
1409
1410         if (status & NAND_STATUS_FAIL)
1411                 return -EIO;
1412
1413         return 0;
1414 }
1415 EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1416
1417 /**
1418  * nand_prog_page_op - Do a full PROG PAGE operation
1419  * @chip: The NAND chip
1420  * @page: page to write
1421  * @offset_in_page: offset within the page
1422  * @buf: buffer containing the data to write to the page
1423  * @len: length of the buffer
1424  *
1425  * This function issues a full PROG PAGE operation.
1426  * This function does not select/unselect the CS line.
1427  *
1428  * Returns 0 on success, a negative error code otherwise.
1429  */
1430 int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1431                       unsigned int offset_in_page, const void *buf,
1432                       unsigned int len)
1433 {
1434         struct mtd_info *mtd = nand_to_mtd(chip);
1435         int status;
1436
1437         if (!len || !buf)
1438                 return -EINVAL;
1439
1440         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1441                 return -EINVAL;
1442
1443         if (nand_has_exec_op(chip)) {
1444                 status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1445                                                 len, true);
1446         } else {
1447                 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
1448                                      page);
1449                 chip->legacy.write_buf(chip, buf, len);
1450                 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1451                 status = chip->legacy.waitfunc(chip);
1452         }
1453
1454         if (status & NAND_STATUS_FAIL)
1455                 return -EIO;
1456
1457         return 0;
1458 }
1459 EXPORT_SYMBOL_GPL(nand_prog_page_op);
1460
1461 /**
1462  * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
1463  * @chip: The NAND chip
1464  * @offset_in_page: offset within the page
1465  * @buf: buffer containing the data to send to the NAND
1466  * @len: length of the buffer
1467  * @force_8bit: force 8-bit bus access
1468  *
1469  * This function issues a CHANGE WRITE COLUMN operation.
1470  * This function does not select/unselect the CS line.
1471  *
1472  * Returns 0 on success, a negative error code otherwise.
1473  */
1474 int nand_change_write_column_op(struct nand_chip *chip,
1475                                 unsigned int offset_in_page,
1476                                 const void *buf, unsigned int len,
1477                                 bool force_8bit)
1478 {
1479         struct mtd_info *mtd = nand_to_mtd(chip);
1480
1481         if (len && !buf)
1482                 return -EINVAL;
1483
1484         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1485                 return -EINVAL;
1486
1487         /* Small page NANDs do not support column change. */
1488         if (mtd->writesize <= 512)
1489                 return -ENOTSUPP;
1490
1491         if (nand_has_exec_op(chip)) {
1492                 const struct nand_sdr_timings *sdr =
1493                         nand_get_sdr_timings(&chip->data_interface);
1494                 u8 addrs[2];
1495                 struct nand_op_instr instrs[] = {
1496                         NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1497                         NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
1498                         NAND_OP_DATA_OUT(len, buf, 0),
1499                 };
1500                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1501                 int ret;
1502
1503                 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1504                 if (ret < 0)
1505                         return ret;
1506
1507                 instrs[2].ctx.data.force_8bit = force_8bit;
1508
1509                 /* Drop the DATA_OUT instruction if len is set to 0. */
1510                 if (!len)
1511                         op.ninstrs--;
1512
1513                 return nand_exec_op(chip, &op);
1514         }
1515
1516         chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
1517         if (len)
1518                 chip->legacy.write_buf(chip, buf, len);
1519
1520         return 0;
1521 }
1522 EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1523
1524 /**
1525  * nand_readid_op - Do a READID operation
1526  * @chip: The NAND chip
1527  * @addr: address cycle to pass after the READID command
1528  * @buf: buffer used to store the ID
1529  * @len: length of the buffer
1530  *
1531  * This function sends a READID command and reads back the ID returned by the
1532  * NAND.
1533  * This function does not select/unselect the CS line.
1534  *
1535  * Returns 0 on success, a negative error code otherwise.
1536  */
1537 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1538                    unsigned int len)
1539 {
1540         unsigned int i;
1541         u8 *id = buf;
1542
1543         if (len && !buf)
1544                 return -EINVAL;
1545
1546         if (nand_has_exec_op(chip)) {
1547                 const struct nand_sdr_timings *sdr =
1548                         nand_get_sdr_timings(&chip->data_interface);
1549                 struct nand_op_instr instrs[] = {
1550                         NAND_OP_CMD(NAND_CMD_READID, 0),
1551                         NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
1552                         NAND_OP_8BIT_DATA_IN(len, buf, 0),
1553                 };
1554                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1555
1556                 /* Drop the DATA_IN instruction if len is set to 0. */
1557                 if (!len)
1558                         op.ninstrs--;
1559
1560                 return nand_exec_op(chip, &op);
1561         }
1562
1563         chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
1564
1565         for (i = 0; i < len; i++)
1566                 id[i] = chip->legacy.read_byte(chip);
1567
1568         return 0;
1569 }
1570 EXPORT_SYMBOL_GPL(nand_readid_op);
1571
1572 /**
1573  * nand_status_op - Do a STATUS operation
1574  * @chip: The NAND chip
1575  * @status: out variable to store the NAND status
1576  *
1577  * This function sends a STATUS command and reads back the status returned by
1578  * the NAND.
1579  * This function does not select/unselect the CS line.
1580  *
1581  * Returns 0 on success, a negative error code otherwise.
1582  */
1583 int nand_status_op(struct nand_chip *chip, u8 *status)
1584 {
1585         if (nand_has_exec_op(chip)) {
1586                 const struct nand_sdr_timings *sdr =
1587                         nand_get_sdr_timings(&chip->data_interface);
1588                 struct nand_op_instr instrs[] = {
1589                         NAND_OP_CMD(NAND_CMD_STATUS,
1590                                     PSEC_TO_NSEC(sdr->tADL_min)),
1591                         NAND_OP_8BIT_DATA_IN(1, status, 0),
1592                 };
1593                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1594
1595                 if (!status)
1596                         op.ninstrs--;
1597
1598                 return nand_exec_op(chip, &op);
1599         }
1600
1601         chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
1602         if (status)
1603                 *status = chip->legacy.read_byte(chip);
1604
1605         return 0;
1606 }
1607 EXPORT_SYMBOL_GPL(nand_status_op);
1608
1609 /**
1610  * nand_exit_status_op - Exit a STATUS operation
1611  * @chip: The NAND chip
1612  *
1613  * This function sends a READ0 command to cancel the effect of the STATUS
1614  * command to avoid reading only the status until a new read command is sent.
1615  *
1616  * This function does not select/unselect the CS line.
1617  *
1618  * Returns 0 on success, a negative error code otherwise.
1619  */
1620 int nand_exit_status_op(struct nand_chip *chip)
1621 {
1622         if (nand_has_exec_op(chip)) {
1623                 struct nand_op_instr instrs[] = {
1624                         NAND_OP_CMD(NAND_CMD_READ0, 0),
1625                 };
1626                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1627
1628                 return nand_exec_op(chip, &op);
1629         }
1630
1631         chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
1632
1633         return 0;
1634 }
1635
1636 /**
1637  * nand_erase_op - Do an erase operation
1638  * @chip: The NAND chip
1639  * @eraseblock: block to erase
1640  *
1641  * This function sends an ERASE command and waits for the NAND to be ready
1642  * before returning.
1643  * This function does not select/unselect the CS line.
1644  *
1645  * Returns 0 on success, a negative error code otherwise.
1646  */
1647 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
1648 {
1649         unsigned int page = eraseblock <<
1650                             (chip->phys_erase_shift - chip->page_shift);
1651         int ret;
1652         u8 status;
1653
1654         if (nand_has_exec_op(chip)) {
1655                 const struct nand_sdr_timings *sdr =
1656                         nand_get_sdr_timings(&chip->data_interface);
1657                 u8 addrs[3] = { page, page >> 8, page >> 16 };
1658                 struct nand_op_instr instrs[] = {
1659                         NAND_OP_CMD(NAND_CMD_ERASE1, 0),
1660                         NAND_OP_ADDR(2, addrs, 0),
1661                         NAND_OP_CMD(NAND_CMD_ERASE2,
1662                                     PSEC_TO_MSEC(sdr->tWB_max)),
1663                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
1664                 };
1665                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1666
1667                 if (chip->options & NAND_ROW_ADDR_3)
1668                         instrs[1].ctx.addr.naddrs++;
1669
1670                 ret = nand_exec_op(chip, &op);
1671                 if (ret)
1672                         return ret;
1673
1674                 ret = nand_status_op(chip, &status);
1675                 if (ret)
1676                         return ret;
1677         } else {
1678                 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
1679                 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
1680
1681                 ret = chip->legacy.waitfunc(chip);
1682                 if (ret < 0)
1683                         return ret;
1684
1685                 status = ret;
1686         }
1687
1688         if (status & NAND_STATUS_FAIL)
1689                 return -EIO;
1690
1691         return 0;
1692 }
1693 EXPORT_SYMBOL_GPL(nand_erase_op);
1694
1695 /**
1696  * nand_set_features_op - Do a SET FEATURES operation
1697  * @chip: The NAND chip
1698  * @feature: feature id
1699  * @data: 4 bytes of data
1700  *
1701  * This function sends a SET FEATURES command and waits for the NAND to be
1702  * ready before returning.
1703  * This function does not select/unselect the CS line.
1704  *
1705  * Returns 0 on success, a negative error code otherwise.
1706  */
1707 static int nand_set_features_op(struct nand_chip *chip, u8 feature,
1708                                 const void *data)
1709 {
1710         const u8 *params = data;
1711         int i, ret;
1712
1713         if (nand_has_exec_op(chip)) {
1714                 const struct nand_sdr_timings *sdr =
1715                         nand_get_sdr_timings(&chip->data_interface);
1716                 struct nand_op_instr instrs[] = {
1717                         NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
1718                         NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
1719                         NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
1720                                               PSEC_TO_NSEC(sdr->tWB_max)),
1721                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
1722                 };
1723                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1724
1725                 return nand_exec_op(chip, &op);
1726         }
1727
1728         chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
1729         for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1730                 chip->legacy.write_byte(chip, params[i]);
1731
1732         ret = chip->legacy.waitfunc(chip);
1733         if (ret < 0)
1734                 return ret;
1735
1736         if (ret & NAND_STATUS_FAIL)
1737                 return -EIO;
1738
1739         return 0;
1740 }
1741
1742 /**
1743  * nand_get_features_op - Do a GET FEATURES operation
1744  * @chip: The NAND chip
1745  * @feature: feature id
1746  * @data: 4 bytes of data
1747  *
1748  * This function sends a GET FEATURES command and waits for the NAND to be
1749  * ready before returning.
1750  * This function does not select/unselect the CS line.
1751  *
1752  * Returns 0 on success, a negative error code otherwise.
1753  */
1754 static int nand_get_features_op(struct nand_chip *chip, u8 feature,
1755                                 void *data)
1756 {
1757         u8 *params = data;
1758         int i;
1759
1760         if (nand_has_exec_op(chip)) {
1761                 const struct nand_sdr_timings *sdr =
1762                         nand_get_sdr_timings(&chip->data_interface);
1763                 struct nand_op_instr instrs[] = {
1764                         NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
1765                         NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
1766                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
1767                                          PSEC_TO_NSEC(sdr->tRR_min)),
1768                         NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
1769                                              data, 0),
1770                 };
1771                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1772
1773                 return nand_exec_op(chip, &op);
1774         }
1775
1776         chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
1777         for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1778                 params[i] = chip->legacy.read_byte(chip);
1779
1780         return 0;
1781 }
1782
1783 static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
1784                             unsigned int delay_ns)
1785 {
1786         if (nand_has_exec_op(chip)) {
1787                 struct nand_op_instr instrs[] = {
1788                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
1789                                          PSEC_TO_NSEC(delay_ns)),
1790                 };
1791                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1792
1793                 return nand_exec_op(chip, &op);
1794         }
1795
1796         /* Apply delay or wait for ready/busy pin */
1797         if (!chip->legacy.dev_ready)
1798                 udelay(chip->legacy.chip_delay);
1799         else
1800                 nand_wait_ready(chip);
1801
1802         return 0;
1803 }
1804
1805 /**
1806  * nand_reset_op - Do a reset operation
1807  * @chip: The NAND chip
1808  *
1809  * This function sends a RESET command and waits for the NAND to be ready
1810  * before returning.
1811  * This function does not select/unselect the CS line.
1812  *
1813  * Returns 0 on success, a negative error code otherwise.
1814  */
1815 int nand_reset_op(struct nand_chip *chip)
1816 {
1817         if (nand_has_exec_op(chip)) {
1818                 const struct nand_sdr_timings *sdr =
1819                         nand_get_sdr_timings(&chip->data_interface);
1820                 struct nand_op_instr instrs[] = {
1821                         NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
1822                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
1823                 };
1824                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1825
1826                 return nand_exec_op(chip, &op);
1827         }
1828
1829         chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
1830
1831         return 0;
1832 }
1833 EXPORT_SYMBOL_GPL(nand_reset_op);
1834
1835 /**
1836  * nand_read_data_op - Read data from the NAND
1837  * @chip: The NAND chip
1838  * @buf: buffer used to store the data
1839  * @len: length of the buffer
1840  * @force_8bit: force 8-bit bus access
1841  *
1842  * This function does a raw data read on the bus. Usually used after launching
1843  * another NAND operation like nand_read_page_op().
1844  * This function does not select/unselect the CS line.
1845  *
1846  * Returns 0 on success, a negative error code otherwise.
1847  */
1848 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1849                       bool force_8bit)
1850 {
1851         if (!len || !buf)
1852                 return -EINVAL;
1853
1854         if (nand_has_exec_op(chip)) {
1855                 struct nand_op_instr instrs[] = {
1856                         NAND_OP_DATA_IN(len, buf, 0),
1857                 };
1858                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1859
1860                 instrs[0].ctx.data.force_8bit = force_8bit;
1861
1862                 return nand_exec_op(chip, &op);
1863         }
1864
1865         if (force_8bit) {
1866                 u8 *p = buf;
1867                 unsigned int i;
1868
1869                 for (i = 0; i < len; i++)
1870                         p[i] = chip->legacy.read_byte(chip);
1871         } else {
1872                 chip->legacy.read_buf(chip, buf, len);
1873         }
1874
1875         return 0;
1876 }
1877 EXPORT_SYMBOL_GPL(nand_read_data_op);
1878
1879 /**
1880  * nand_write_data_op - Write data from the NAND
1881  * @chip: The NAND chip
1882  * @buf: buffer containing the data to send on the bus
1883  * @len: length of the buffer
1884  * @force_8bit: force 8-bit bus access
1885  *
1886  * This function does a raw data write on the bus. Usually used after launching
1887  * another NAND operation like nand_write_page_begin_op().
1888  * This function does not select/unselect the CS line.
1889  *
1890  * Returns 0 on success, a negative error code otherwise.
1891  */
1892 int nand_write_data_op(struct nand_chip *chip, const void *buf,
1893                        unsigned int len, bool force_8bit)
1894 {
1895         if (!len || !buf)
1896                 return -EINVAL;
1897
1898         if (nand_has_exec_op(chip)) {
1899                 struct nand_op_instr instrs[] = {
1900                         NAND_OP_DATA_OUT(len, buf, 0),
1901                 };
1902                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1903
1904                 instrs[0].ctx.data.force_8bit = force_8bit;
1905
1906                 return nand_exec_op(chip, &op);
1907         }
1908
1909         if (force_8bit) {
1910                 const u8 *p = buf;
1911                 unsigned int i;
1912
1913                 for (i = 0; i < len; i++)
1914                         chip->legacy.write_byte(chip, p[i]);
1915         } else {
1916                 chip->legacy.write_buf(chip, buf, len);
1917         }
1918
1919         return 0;
1920 }
1921 EXPORT_SYMBOL_GPL(nand_write_data_op);
1922
1923 /**
1924  * struct nand_op_parser_ctx - Context used by the parser
1925  * @instrs: array of all the instructions that must be addressed
1926  * @ninstrs: length of the @instrs array
1927  * @subop: Sub-operation to be passed to the NAND controller
1928  *
1929  * This structure is used by the core to split NAND operations into
1930  * sub-operations that can be handled by the NAND controller.
1931  */
1932 struct nand_op_parser_ctx {
1933         const struct nand_op_instr *instrs;
1934         unsigned int ninstrs;
1935         struct nand_subop subop;
1936 };
1937
1938 /**
1939  * nand_op_parser_must_split_instr - Checks if an instruction must be split
1940  * @pat: the parser pattern element that matches @instr
1941  * @instr: pointer to the instruction to check
1942  * @start_offset: this is an in/out parameter. If @instr has already been
1943  *                split, then @start_offset is the offset from which to start
1944  *                (either an address cycle or an offset in the data buffer).
1945  *                Conversely, if the function returns true (ie. instr must be
1946  *                split), this parameter is updated to point to the first
1947  *                data/address cycle that has not been taken care of.
1948  *
1949  * Some NAND controllers are limited and cannot send X address cycles with a
1950  * unique operation, or cannot read/write more than Y bytes at the same time.
1951  * In this case, split the instruction that does not fit in a single
1952  * controller-operation into two or more chunks.
1953  *
1954  * Returns true if the instruction must be split, false otherwise.
1955  * The @start_offset parameter is also updated to the offset at which the next
1956  * bundle of instruction must start (if an address or a data instruction).
1957  */
1958 static bool
1959 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
1960                                 const struct nand_op_instr *instr,
1961                                 unsigned int *start_offset)
1962 {
1963         switch (pat->type) {
1964         case NAND_OP_ADDR_INSTR:
1965                 if (!pat->ctx.addr.maxcycles)
1966                         break;
1967
1968                 if (instr->ctx.addr.naddrs - *start_offset >
1969                     pat->ctx.addr.maxcycles) {
1970                         *start_offset += pat->ctx.addr.maxcycles;
1971                         return true;
1972                 }
1973                 break;
1974
1975         case NAND_OP_DATA_IN_INSTR:
1976         case NAND_OP_DATA_OUT_INSTR:
1977                 if (!pat->ctx.data.maxlen)
1978                         break;
1979
1980                 if (instr->ctx.data.len - *start_offset >
1981                     pat->ctx.data.maxlen) {
1982                         *start_offset += pat->ctx.data.maxlen;
1983                         return true;
1984                 }
1985                 break;
1986
1987         default:
1988                 break;
1989         }
1990
1991         return false;
1992 }
1993
1994 /**
1995  * nand_op_parser_match_pat - Checks if a pattern matches the instructions
1996  *                            remaining in the parser context
1997  * @pat: the pattern to test
1998  * @ctx: the parser context structure to match with the pattern @pat
1999  *
2000  * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
2001  * Returns true if this is the case, false ortherwise. When true is returned,
2002  * @ctx->subop is updated with the set of instructions to be passed to the
2003  * controller driver.
2004  */
2005 static bool
2006 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2007                          struct nand_op_parser_ctx *ctx)
2008 {
2009         unsigned int instr_offset = ctx->subop.first_instr_start_off;
2010         const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2011         const struct nand_op_instr *instr = ctx->subop.instrs;
2012         unsigned int i, ninstrs;
2013
2014         for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2015                 /*
2016                  * The pattern instruction does not match the operation
2017                  * instruction. If the instruction is marked optional in the
2018                  * pattern definition, we skip the pattern element and continue
2019                  * to the next one. If the element is mandatory, there's no
2020                  * match and we can return false directly.
2021                  */
2022                 if (instr->type != pat->elems[i].type) {
2023                         if (!pat->elems[i].optional)
2024                                 return false;
2025
2026                         continue;
2027                 }
2028
2029                 /*
2030                  * Now check the pattern element constraints. If the pattern is
2031                  * not able to handle the whole instruction in a single step,
2032                  * we have to split it.
2033                  * The last_instr_end_off value comes back updated to point to
2034                  * the position where we have to split the instruction (the
2035                  * start of the next subop chunk).
2036                  */
2037                 if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2038                                                     &instr_offset)) {
2039                         ninstrs++;
2040                         i++;
2041                         break;
2042                 }
2043
2044                 instr++;
2045                 ninstrs++;
2046                 instr_offset = 0;
2047         }
2048
2049         /*
2050          * This can happen if all instructions of a pattern are optional.
2051          * Still, if there's not at least one instruction handled by this
2052          * pattern, this is not a match, and we should try the next one (if
2053          * any).
2054          */
2055         if (!ninstrs)
2056                 return false;
2057
2058         /*
2059          * We had a match on the pattern head, but the pattern may be longer
2060          * than the instructions we're asked to execute. We need to make sure
2061          * there's no mandatory elements in the pattern tail.
2062          */
2063         for (; i < pat->nelems; i++) {
2064                 if (!pat->elems[i].optional)
2065                         return false;
2066         }
2067
2068         /*
2069          * We have a match: update the subop structure accordingly and return
2070          * true.
2071          */
2072         ctx->subop.ninstrs = ninstrs;
2073         ctx->subop.last_instr_end_off = instr_offset;
2074
2075         return true;
2076 }
2077
2078 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2079 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2080 {
2081         const struct nand_op_instr *instr;
2082         char *prefix = "      ";
2083         unsigned int i;
2084
2085         pr_debug("executing subop:\n");
2086
2087         for (i = 0; i < ctx->ninstrs; i++) {
2088                 instr = &ctx->instrs[i];
2089
2090                 if (instr == &ctx->subop.instrs[0])
2091                         prefix = "    ->";
2092
2093                 switch (instr->type) {
2094                 case NAND_OP_CMD_INSTR:
2095                         pr_debug("%sCMD      [0x%02x]\n", prefix,
2096                                  instr->ctx.cmd.opcode);
2097                         break;
2098                 case NAND_OP_ADDR_INSTR:
2099                         pr_debug("%sADDR     [%d cyc: %*ph]\n", prefix,
2100                                  instr->ctx.addr.naddrs,
2101                                  instr->ctx.addr.naddrs < 64 ?
2102                                  instr->ctx.addr.naddrs : 64,
2103                                  instr->ctx.addr.addrs);
2104                         break;
2105                 case NAND_OP_DATA_IN_INSTR:
2106                         pr_debug("%sDATA_IN  [%d B%s]\n", prefix,
2107                                  instr->ctx.data.len,
2108                                  instr->ctx.data.force_8bit ?
2109                                  ", force 8-bit" : "");
2110                         break;
2111                 case NAND_OP_DATA_OUT_INSTR:
2112                         pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
2113                                  instr->ctx.data.len,
2114                                  instr->ctx.data.force_8bit ?
2115                                  ", force 8-bit" : "");
2116                         break;
2117                 case NAND_OP_WAITRDY_INSTR:
2118                         pr_debug("%sWAITRDY  [max %d ms]\n", prefix,
2119                                  instr->ctx.waitrdy.timeout_ms);
2120                         break;
2121                 }
2122
2123                 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2124                         prefix = "      ";
2125         }
2126 }
2127 #else
2128 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2129 {
2130         /* NOP */
2131 }
2132 #endif
2133
2134 /**
2135  * nand_op_parser_exec_op - exec_op parser
2136  * @chip: the NAND chip
2137  * @parser: patterns description provided by the controller driver
2138  * @op: the NAND operation to address
2139  * @check_only: when true, the function only checks if @op can be handled but
2140  *              does not execute the operation
2141  *
2142  * Helper function designed to ease integration of NAND controller drivers that
2143  * only support a limited set of instruction sequences. The supported sequences
2144  * are described in @parser, and the framework takes care of splitting @op into
2145  * multiple sub-operations (if required) and pass them back to the ->exec()
2146  * callback of the matching pattern if @check_only is set to false.
2147  *
2148  * NAND controller drivers should call this function from their own ->exec_op()
2149  * implementation.
2150  *
2151  * Returns 0 on success, a negative error code otherwise. A failure can be
2152  * caused by an unsupported operation (none of the supported patterns is able
2153  * to handle the requested operation), or an error returned by one of the
2154  * matching pattern->exec() hook.
2155  */
2156 int nand_op_parser_exec_op(struct nand_chip *chip,
2157                            const struct nand_op_parser *parser,
2158                            const struct nand_operation *op, bool check_only)
2159 {
2160         struct nand_op_parser_ctx ctx = {
2161                 .subop.instrs = op->instrs,
2162                 .instrs = op->instrs,
2163                 .ninstrs = op->ninstrs,
2164         };
2165         unsigned int i;
2166
2167         while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2168                 int ret;
2169
2170                 for (i = 0; i < parser->npatterns; i++) {
2171                         const struct nand_op_parser_pattern *pattern;
2172
2173                         pattern = &parser->patterns[i];
2174                         if (!nand_op_parser_match_pat(pattern, &ctx))
2175                                 continue;
2176
2177                         nand_op_parser_trace(&ctx);
2178
2179                         if (check_only)
2180                                 break;
2181
2182                         ret = pattern->exec(chip, &ctx.subop);
2183                         if (ret)
2184                                 return ret;
2185
2186                         break;
2187                 }
2188
2189                 if (i == parser->npatterns) {
2190                         pr_debug("->exec_op() parser: pattern not found!\n");
2191                         return -ENOTSUPP;
2192                 }
2193
2194                 /*
2195                  * Update the context structure by pointing to the start of the
2196                  * next subop.
2197                  */
2198                 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2199                 if (ctx.subop.last_instr_end_off)
2200                         ctx.subop.instrs -= 1;
2201
2202                 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2203         }
2204
2205         return 0;
2206 }
2207 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2208
2209 static bool nand_instr_is_data(const struct nand_op_instr *instr)
2210 {
2211         return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2212                          instr->type == NAND_OP_DATA_OUT_INSTR);
2213 }
2214
2215 static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2216                                       unsigned int instr_idx)
2217 {
2218         return subop && instr_idx < subop->ninstrs;
2219 }
2220
2221 static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2222                                              unsigned int instr_idx)
2223 {
2224         if (instr_idx)
2225                 return 0;
2226
2227         return subop->first_instr_start_off;
2228 }
2229
2230 /**
2231  * nand_subop_get_addr_start_off - Get the start offset in an address array
2232  * @subop: The entire sub-operation
2233  * @instr_idx: Index of the instruction inside the sub-operation
2234  *
2235  * During driver development, one could be tempted to directly use the
2236  * ->addr.addrs field of address instructions. This is wrong as address
2237  * instructions might be split.
2238  *
2239  * Given an address instruction, returns the offset of the first cycle to issue.
2240  */
2241 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2242                                            unsigned int instr_idx)
2243 {
2244         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2245                     subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2246                 return 0;
2247
2248         return nand_subop_get_start_off(subop, instr_idx);
2249 }
2250 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2251
2252 /**
2253  * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
2254  * @subop: The entire sub-operation
2255  * @instr_idx: Index of the instruction inside the sub-operation
2256  *
2257  * During driver development, one could be tempted to directly use the
2258  * ->addr->naddrs field of a data instruction. This is wrong as instructions
2259  * might be split.
2260  *
2261  * Given an address instruction, returns the number of address cycle to issue.
2262  */
2263 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2264                                          unsigned int instr_idx)
2265 {
2266         int start_off, end_off;
2267
2268         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2269                     subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2270                 return 0;
2271
2272         start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2273
2274         if (instr_idx == subop->ninstrs - 1 &&
2275             subop->last_instr_end_off)
2276                 end_off = subop->last_instr_end_off;
2277         else
2278                 end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2279
2280         return end_off - start_off;
2281 }
2282 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2283
2284 /**
2285  * nand_subop_get_data_start_off - Get the start offset in a data array
2286  * @subop: The entire sub-operation
2287  * @instr_idx: Index of the instruction inside the sub-operation
2288  *
2289  * During driver development, one could be tempted to directly use the
2290  * ->data->buf.{in,out} field of data instructions. This is wrong as data
2291  * instructions might be split.
2292  *
2293  * Given a data instruction, returns the offset to start from.
2294  */
2295 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2296                                            unsigned int instr_idx)
2297 {
2298         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2299                     !nand_instr_is_data(&subop->instrs[instr_idx])))
2300                 return 0;
2301
2302         return nand_subop_get_start_off(subop, instr_idx);
2303 }
2304 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2305
2306 /**
2307  * nand_subop_get_data_len - Get the number of bytes to retrieve
2308  * @subop: The entire sub-operation
2309  * @instr_idx: Index of the instruction inside the sub-operation
2310  *
2311  * During driver development, one could be tempted to directly use the
2312  * ->data->len field of a data instruction. This is wrong as data instructions
2313  * might be split.
2314  *
2315  * Returns the length of the chunk of data to send/receive.
2316  */
2317 unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2318                                      unsigned int instr_idx)
2319 {
2320         int start_off = 0, end_off;
2321
2322         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2323                     !nand_instr_is_data(&subop->instrs[instr_idx])))
2324                 return 0;
2325
2326         start_off = nand_subop_get_data_start_off(subop, instr_idx);
2327
2328         if (instr_idx == subop->ninstrs - 1 &&
2329             subop->last_instr_end_off)
2330                 end_off = subop->last_instr_end_off;
2331         else
2332                 end_off = subop->instrs[instr_idx].ctx.data.len;
2333
2334         return end_off - start_off;
2335 }
2336 EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2337
2338 /**
2339  * nand_reset - Reset and initialize a NAND device
2340  * @chip: The NAND chip
2341  * @chipnr: Internal die id
2342  *
2343  * Save the timings data structure, then apply SDR timings mode 0 (see
2344  * nand_reset_data_interface for details), do the reset operation, and
2345  * apply back the previous timings.
2346  *
2347  * Returns 0 on success, a negative error code otherwise.
2348  */
2349 int nand_reset(struct nand_chip *chip, int chipnr)
2350 {
2351         struct nand_data_interface saved_data_intf = chip->data_interface;
2352         int ret;
2353
2354         ret = nand_reset_data_interface(chip, chipnr);
2355         if (ret)
2356                 return ret;
2357
2358         /*
2359          * The CS line has to be released before we can apply the new NAND
2360          * interface settings, hence this weird nand_select_target()
2361          * nand_deselect_target() dance.
2362          */
2363         nand_select_target(chip, chipnr);
2364         ret = nand_reset_op(chip);
2365         nand_deselect_target(chip);
2366         if (ret)
2367                 return ret;
2368
2369         /*
2370          * A nand_reset_data_interface() put both the NAND chip and the NAND
2371          * controller in timings mode 0. If the default mode for this chip is
2372          * also 0, no need to proceed to the change again. Plus, at probe time,
2373          * nand_setup_data_interface() uses ->set/get_features() which would
2374          * fail anyway as the parameter page is not available yet.
2375          */
2376         if (!chip->onfi_timing_mode_default)
2377                 return 0;
2378
2379         chip->data_interface = saved_data_intf;
2380         ret = nand_setup_data_interface(chip, chipnr);
2381         if (ret)
2382                 return ret;
2383
2384         return 0;
2385 }
2386 EXPORT_SYMBOL_GPL(nand_reset);
2387
2388 /**
2389  * nand_get_features - wrapper to perform a GET_FEATURE
2390  * @chip: NAND chip info structure
2391  * @addr: feature address
2392  * @subfeature_param: the subfeature parameters, a four bytes array
2393  *
2394  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2395  * operation cannot be handled.
2396  */
2397 int nand_get_features(struct nand_chip *chip, int addr,
2398                       u8 *subfeature_param)
2399 {
2400         if (!nand_supports_get_features(chip, addr))
2401                 return -ENOTSUPP;
2402
2403         if (chip->legacy.get_features)
2404                 return chip->legacy.get_features(chip, addr, subfeature_param);
2405
2406         return nand_get_features_op(chip, addr, subfeature_param);
2407 }
2408
2409 /**
2410  * nand_set_features - wrapper to perform a SET_FEATURE
2411  * @chip: NAND chip info structure
2412  * @addr: feature address
2413  * @subfeature_param: the subfeature parameters, a four bytes array
2414  *
2415  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2416  * operation cannot be handled.
2417  */
2418 int nand_set_features(struct nand_chip *chip, int addr,
2419                       u8 *subfeature_param)
2420 {
2421         if (!nand_supports_set_features(chip, addr))
2422                 return -ENOTSUPP;
2423
2424         if (chip->legacy.set_features)
2425                 return chip->legacy.set_features(chip, addr, subfeature_param);
2426
2427         return nand_set_features_op(chip, addr, subfeature_param);
2428 }
2429
2430 /**
2431  * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
2432  * @buf: buffer to test
2433  * @len: buffer length
2434  * @bitflips_threshold: maximum number of bitflips
2435  *
2436  * Check if a buffer contains only 0xff, which means the underlying region
2437  * has been erased and is ready to be programmed.
2438  * The bitflips_threshold specify the maximum number of bitflips before
2439  * considering the region is not erased.
2440  * Note: The logic of this function has been extracted from the memweight
2441  * implementation, except that nand_check_erased_buf function exit before
2442  * testing the whole buffer if the number of bitflips exceed the
2443  * bitflips_threshold value.
2444  *
2445  * Returns a positive number of bitflips less than or equal to
2446  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2447  * threshold.
2448  */
2449 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2450 {
2451         const unsigned char *bitmap = buf;
2452         int bitflips = 0;
2453         int weight;
2454
2455         for (; len && ((uintptr_t)bitmap) % sizeof(long);
2456              len--, bitmap++) {
2457                 weight = hweight8(*bitmap);
2458                 bitflips += BITS_PER_BYTE - weight;
2459                 if (unlikely(bitflips > bitflips_threshold))
2460                         return -EBADMSG;
2461         }
2462
2463         for (; len >= sizeof(long);
2464              len -= sizeof(long), bitmap += sizeof(long)) {
2465                 unsigned long d = *((unsigned long *)bitmap);
2466                 if (d == ~0UL)
2467                         continue;
2468                 weight = hweight_long(d);
2469                 bitflips += BITS_PER_LONG - weight;
2470                 if (unlikely(bitflips > bitflips_threshold))
2471                         return -EBADMSG;
2472         }
2473
2474         for (; len > 0; len--, bitmap++) {
2475                 weight = hweight8(*bitmap);
2476                 bitflips += BITS_PER_BYTE - weight;
2477                 if (unlikely(bitflips > bitflips_threshold))
2478                         return -EBADMSG;
2479         }
2480
2481         return bitflips;
2482 }
2483
2484 /**
2485  * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
2486  *                               0xff data
2487  * @data: data buffer to test
2488  * @datalen: data length
2489  * @ecc: ECC buffer
2490  * @ecclen: ECC length
2491  * @extraoob: extra OOB buffer
2492  * @extraooblen: extra OOB length
2493  * @bitflips_threshold: maximum number of bitflips
2494  *
2495  * Check if a data buffer and its associated ECC and OOB data contains only
2496  * 0xff pattern, which means the underlying region has been erased and is
2497  * ready to be programmed.
2498  * The bitflips_threshold specify the maximum number of bitflips before
2499  * considering the region as not erased.
2500  *
2501  * Note:
2502  * 1/ ECC algorithms are working on pre-defined block sizes which are usually
2503  *    different from the NAND page size. When fixing bitflips, ECC engines will
2504  *    report the number of errors per chunk, and the NAND core infrastructure
2505  *    expect you to return the maximum number of bitflips for the whole page.
2506  *    This is why you should always use this function on a single chunk and
2507  *    not on the whole page. After checking each chunk you should update your
2508  *    max_bitflips value accordingly.
2509  * 2/ When checking for bitflips in erased pages you should not only check
2510  *    the payload data but also their associated ECC data, because a user might
2511  *    have programmed almost all bits to 1 but a few. In this case, we
2512  *    shouldn't consider the chunk as erased, and checking ECC bytes prevent
2513  *    this case.
2514  * 3/ The extraoob argument is optional, and should be used if some of your OOB
2515  *    data are protected by the ECC engine.
2516  *    It could also be used if you support subpages and want to attach some
2517  *    extra OOB data to an ECC chunk.
2518  *
2519  * Returns a positive number of bitflips less than or equal to
2520  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2521  * threshold. In case of success, the passed buffers are filled with 0xff.
2522  */
2523 int nand_check_erased_ecc_chunk(void *data, int datalen,
2524                                 void *ecc, int ecclen,
2525                                 void *extraoob, int extraooblen,
2526                                 int bitflips_threshold)
2527 {
2528         int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2529
2530         data_bitflips = nand_check_erased_buf(data, datalen,
2531                                               bitflips_threshold);
2532         if (data_bitflips < 0)
2533                 return data_bitflips;
2534
2535         bitflips_threshold -= data_bitflips;
2536
2537         ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2538         if (ecc_bitflips < 0)
2539                 return ecc_bitflips;
2540
2541         bitflips_threshold -= ecc_bitflips;
2542
2543         extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2544                                                   bitflips_threshold);
2545         if (extraoob_bitflips < 0)
2546                 return extraoob_bitflips;
2547
2548         if (data_bitflips)
2549                 memset(data, 0xff, datalen);
2550
2551         if (ecc_bitflips)
2552                 memset(ecc, 0xff, ecclen);
2553
2554         if (extraoob_bitflips)
2555                 memset(extraoob, 0xff, extraooblen);
2556
2557         return data_bitflips + ecc_bitflips + extraoob_bitflips;
2558 }
2559 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2560
2561 /**
2562  * nand_read_page_raw_notsupp - dummy read raw page function
2563  * @chip: nand chip info structure
2564  * @buf: buffer to store read data
2565  * @oob_required: caller requires OOB data read to chip->oob_poi
2566  * @page: page number to read
2567  *
2568  * Returns -ENOTSUPP unconditionally.
2569  */
2570 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
2571                                int oob_required, int page)
2572 {
2573         return -ENOTSUPP;
2574 }
2575
2576 /**
2577  * nand_read_page_raw - [INTERN] read raw page data without ecc
2578  * @chip: nand chip info structure
2579  * @buf: buffer to store read data
2580  * @oob_required: caller requires OOB data read to chip->oob_poi
2581  * @page: page number to read
2582  *
2583  * Not for syndrome calculating ECC controllers, which use a special oob layout.
2584  */
2585 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
2586                        int page)
2587 {
2588         struct mtd_info *mtd = nand_to_mtd(chip);
2589         int ret;
2590
2591         ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2592         if (ret)
2593                 return ret;
2594
2595         if (oob_required) {
2596                 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2597                                         false);
2598                 if (ret)
2599                         return ret;
2600         }
2601
2602         return 0;
2603 }
2604 EXPORT_SYMBOL(nand_read_page_raw);
2605
2606 /**
2607  * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
2608  * @chip: nand chip info structure
2609  * @buf: buffer to store read data
2610  * @oob_required: caller requires OOB data read to chip->oob_poi
2611  * @page: page number to read
2612  *
2613  * We need a special oob layout and handling even when OOB isn't used.
2614  */
2615 static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
2616                                        int oob_required, int page)
2617 {
2618         struct mtd_info *mtd = nand_to_mtd(chip);
2619         int eccsize = chip->ecc.size;
2620         int eccbytes = chip->ecc.bytes;
2621         uint8_t *oob = chip->oob_poi;
2622         int steps, size, ret;
2623
2624         ret = nand_read_page_op(chip, page, 0, NULL, 0);
2625         if (ret)
2626                 return ret;
2627
2628         for (steps = chip->ecc.steps; steps > 0; steps--) {
2629                 ret = nand_read_data_op(chip, buf, eccsize, false);
2630                 if (ret)
2631                         return ret;
2632
2633                 buf += eccsize;
2634
2635                 if (chip->ecc.prepad) {
2636                         ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2637                                                 false);
2638                         if (ret)
2639                                 return ret;
2640
2641                         oob += chip->ecc.prepad;
2642                 }
2643
2644                 ret = nand_read_data_op(chip, oob, eccbytes, false);
2645                 if (ret)
2646                         return ret;
2647
2648                 oob += eccbytes;
2649
2650                 if (chip->ecc.postpad) {
2651                         ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2652                                                 false);
2653                         if (ret)
2654                                 return ret;
2655
2656                         oob += chip->ecc.postpad;
2657                 }
2658         }
2659
2660         size = mtd->oobsize - (oob - chip->oob_poi);
2661         if (size) {
2662                 ret = nand_read_data_op(chip, oob, size, false);
2663                 if (ret)
2664                         return ret;
2665         }
2666
2667         return 0;
2668 }
2669
2670 /**
2671  * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
2672  * @chip: nand chip info structure
2673  * @buf: buffer to store read data
2674  * @oob_required: caller requires OOB data read to chip->oob_poi
2675  * @page: page number to read
2676  */
2677 static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
2678                                 int oob_required, int page)
2679 {
2680         struct mtd_info *mtd = nand_to_mtd(chip);
2681         int i, eccsize = chip->ecc.size, ret;
2682         int eccbytes = chip->ecc.bytes;
2683         int eccsteps = chip->ecc.steps;
2684         uint8_t *p = buf;
2685         uint8_t *ecc_calc = chip->ecc.calc_buf;
2686         uint8_t *ecc_code = chip->ecc.code_buf;
2687         unsigned int max_bitflips = 0;
2688
2689         chip->ecc.read_page_raw(chip, buf, 1, page);
2690
2691         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2692                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2693
2694         ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2695                                          chip->ecc.total);
2696         if (ret)
2697                 return ret;
2698
2699         eccsteps = chip->ecc.steps;
2700         p = buf;
2701
2702         for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2703                 int stat;
2704
2705                 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2706                 if (stat < 0) {
2707                         mtd->ecc_stats.failed++;
2708                 } else {
2709                         mtd->ecc_stats.corrected += stat;
2710                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
2711                 }
2712         }
2713         return max_bitflips;
2714 }
2715
2716 /**
2717  * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
2718  * @chip: nand chip info structure
2719  * @data_offs: offset of requested data within the page
2720  * @readlen: data length
2721  * @bufpoi: buffer to store read data
2722  * @page: page number to read
2723  */
2724 static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
2725                              uint32_t readlen, uint8_t *bufpoi, int page)
2726 {
2727         struct mtd_info *mtd = nand_to_mtd(chip);
2728         int start_step, end_step, num_steps, ret;
2729         uint8_t *p;
2730         int data_col_addr, i, gaps = 0;
2731         int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
2732         int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
2733         int index, section = 0;
2734         unsigned int max_bitflips = 0;
2735         struct mtd_oob_region oobregion = { };
2736
2737         /* Column address within the page aligned to ECC size (256bytes) */
2738         start_step = data_offs / chip->ecc.size;
2739         end_step = (data_offs + readlen - 1) / chip->ecc.size;
2740         num_steps = end_step - start_step + 1;
2741         index = start_step * chip->ecc.bytes;
2742
2743         /* Data size aligned to ECC ecc.size */
2744         datafrag_len = num_steps * chip->ecc.size;
2745         eccfrag_len = num_steps * chip->ecc.bytes;
2746
2747         data_col_addr = start_step * chip->ecc.size;
2748         /* If we read not a page aligned data */
2749         p = bufpoi + data_col_addr;
2750         ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
2751         if (ret)
2752                 return ret;
2753
2754         /* Calculate ECC */
2755         for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
2756                 chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
2757
2758         /*
2759          * The performance is faster if we position offsets according to
2760          * ecc.pos. Let's make sure that there are no gaps in ECC positions.
2761          */
2762         ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
2763         if (ret)
2764                 return ret;
2765
2766         if (oobregion.length < eccfrag_len)
2767                 gaps = 1;
2768
2769         if (gaps) {
2770                 ret = nand_change_read_column_op(chip, mtd->writesize,
2771                                                  chip->oob_poi, mtd->oobsize,
2772                                                  false);
2773                 if (ret)
2774                         return ret;
2775         } else {
2776                 /*
2777                  * Send the command to read the particular ECC bytes take care
2778                  * about buswidth alignment in read_buf.
2779                  */
2780                 aligned_pos = oobregion.offset & ~(busw - 1);
2781                 aligned_len = eccfrag_len;
2782                 if (oobregion.offset & (busw - 1))
2783                         aligned_len++;
2784                 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
2785                     (busw - 1))
2786                         aligned_len++;
2787
2788                 ret = nand_change_read_column_op(chip,
2789                                                  mtd->writesize + aligned_pos,
2790                                                  &chip->oob_poi[aligned_pos],
2791                                                  aligned_len, false);
2792                 if (ret)
2793                         return ret;
2794         }
2795
2796         ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
2797                                          chip->oob_poi, index, eccfrag_len);
2798         if (ret)
2799                 return ret;
2800
2801         p = bufpoi + data_col_addr;
2802         for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
2803                 int stat;
2804
2805                 stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
2806                                          &chip->ecc.calc_buf[i]);
2807                 if (stat == -EBADMSG &&
2808                     (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2809                         /* check for empty pages with bitflips */
2810                         stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
2811                                                 &chip->ecc.code_buf[i],
2812                                                 chip->ecc.bytes,
2813                                                 NULL, 0,
2814                                                 chip->ecc.strength);
2815                 }
2816
2817                 if (stat < 0) {
2818                         mtd->ecc_stats.failed++;
2819                 } else {
2820                         mtd->ecc_stats.corrected += stat;
2821                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
2822                 }
2823         }
2824         return max_bitflips;
2825 }
2826
2827 /**
2828  * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
2829  * @chip: nand chip info structure
2830  * @buf: buffer to store read data
2831  * @oob_required: caller requires OOB data read to chip->oob_poi
2832  * @page: page number to read
2833  *
2834  * Not for syndrome calculating ECC controllers which need a special oob layout.
2835  */
2836 static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
2837                                 int oob_required, int page)
2838 {
2839         struct mtd_info *mtd = nand_to_mtd(chip);
2840         int i, eccsize = chip->ecc.size, ret;
2841         int eccbytes = chip->ecc.bytes;
2842         int eccsteps = chip->ecc.steps;
2843         uint8_t *p = buf;
2844         uint8_t *ecc_calc = chip->ecc.calc_buf;
2845         uint8_t *ecc_code = chip->ecc.code_buf;
2846         unsigned int max_bitflips = 0;
2847
2848         ret = nand_read_page_op(chip, page, 0, NULL, 0);
2849         if (ret)
2850                 return ret;
2851
2852         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2853                 chip->ecc.hwctl(chip, NAND_ECC_READ);
2854
2855                 ret = nand_read_data_op(chip, p, eccsize, false);
2856                 if (ret)
2857                         return ret;
2858
2859                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2860         }
2861
2862         ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
2863         if (ret)
2864                 return ret;
2865
2866         ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2867                                          chip->ecc.total);
2868         if (ret)
2869                 return ret;
2870
2871         eccsteps = chip->ecc.steps;
2872         p = buf;
2873
2874         for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2875                 int stat;
2876
2877                 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2878                 if (stat == -EBADMSG &&
2879                     (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2880                         /* check for empty pages with bitflips */
2881                         stat = nand_check_erased_ecc_chunk(p, eccsize,
2882                                                 &ecc_code[i], eccbytes,
2883                                                 NULL, 0,
2884                                                 chip->ecc.strength);
2885                 }
2886
2887                 if (stat < 0) {
2888                         mtd->ecc_stats.failed++;
2889                 } else {
2890                         mtd->ecc_stats.corrected += stat;
2891                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
2892                 }
2893         }
2894         return max_bitflips;
2895 }
2896
2897 /**
2898  * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
2899  * @chip: nand chip info structure
2900  * @buf: buffer to store read data
2901  * @oob_required: caller requires OOB data read to chip->oob_poi
2902  * @page: page number to read
2903  *
2904  * Hardware ECC for large page chips, require OOB to be read first. For this
2905  * ECC mode, the write_page method is re-used from ECC_HW. These methods
2906  * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
2907  * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
2908  * the data area, by overwriting the NAND manufacturer bad block markings.
2909  */
2910 static int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
2911                                           int oob_required, int page)
2912 {
2913         struct mtd_info *mtd = nand_to_mtd(chip);
2914         int i, eccsize = chip->ecc.size, ret;
2915         int eccbytes = chip->ecc.bytes;
2916         int eccsteps = chip->ecc.steps;
2917         uint8_t *p = buf;
2918         uint8_t *ecc_code = chip->ecc.code_buf;
2919         uint8_t *ecc_calc = chip->ecc.calc_buf;
2920         unsigned int max_bitflips = 0;
2921
2922         /* Read the OOB area first */
2923         ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
2924         if (ret)
2925                 return ret;
2926
2927         ret = nand_read_page_op(chip, page, 0, NULL, 0);
2928         if (ret)
2929                 return ret;
2930
2931         ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2932                                          chip->ecc.total);
2933         if (ret)
2934                 return ret;
2935
2936         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2937                 int stat;
2938
2939                 chip->ecc.hwctl(chip, NAND_ECC_READ);
2940
2941                 ret = nand_read_data_op(chip, p, eccsize, false);
2942                 if (ret)
2943                         return ret;
2944
2945                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2946
2947                 stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
2948                 if (stat == -EBADMSG &&
2949                     (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2950                         /* check for empty pages with bitflips */
2951                         stat = nand_check_erased_ecc_chunk(p, eccsize,
2952                                                 &ecc_code[i], eccbytes,
2953                                                 NULL, 0,
2954                                                 chip->ecc.strength);
2955                 }
2956
2957                 if (stat < 0) {
2958                         mtd->ecc_stats.failed++;
2959                 } else {
2960                         mtd->ecc_stats.corrected += stat;
2961                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
2962                 }
2963         }
2964         return max_bitflips;
2965 }
2966
2967 /**
2968  * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
2969  * @chip: nand chip info structure
2970  * @buf: buffer to store read data
2971  * @oob_required: caller requires OOB data read to chip->oob_poi
2972  * @page: page number to read
2973  *
2974  * The hw generator calculates the error syndrome automatically. Therefore we
2975  * need a special oob layout and handling.
2976  */
2977 static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
2978                                    int oob_required, int page)
2979 {
2980         struct mtd_info *mtd = nand_to_mtd(chip);
2981         int ret, i, eccsize = chip->ecc.size;
2982         int eccbytes = chip->ecc.bytes;
2983         int eccsteps = chip->ecc.steps;
2984         int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
2985         uint8_t *p = buf;
2986         uint8_t *oob = chip->oob_poi;
2987         unsigned int max_bitflips = 0;
2988
2989         ret = nand_read_page_op(chip, page, 0, NULL, 0);
2990         if (ret)
2991                 return ret;
2992
2993         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2994                 int stat;
2995
2996                 chip->ecc.hwctl(chip, NAND_ECC_READ);
2997
2998                 ret = nand_read_data_op(chip, p, eccsize, false);
2999                 if (ret)
3000                         return ret;
3001
3002                 if (chip->ecc.prepad) {
3003                         ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3004                                                 false);
3005                         if (ret)
3006                                 return ret;
3007
3008                         oob += chip->ecc.prepad;
3009                 }
3010
3011                 chip->ecc.hwctl(chip, NAND_ECC_READSYN);
3012
3013                 ret = nand_read_data_op(chip, oob, eccbytes, false);
3014                 if (ret)
3015                         return ret;
3016
3017                 stat = chip->ecc.correct(chip, p, oob, NULL);
3018
3019                 oob += eccbytes;
3020
3021                 if (chip->ecc.postpad) {
3022                         ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3023                                                 false);
3024                         if (ret)
3025                                 return ret;
3026
3027                         oob += chip->ecc.postpad;
3028                 }
3029
3030                 if (stat == -EBADMSG &&
3031                     (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3032                         /* check for empty pages with bitflips */
3033                         stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3034                                                            oob - eccpadbytes,
3035                                                            eccpadbytes,
3036                                                            NULL, 0,
3037                                                            chip->ecc.strength);
3038                 }
3039
3040                 if (stat < 0) {
3041                         mtd->ecc_stats.failed++;
3042                 } else {
3043                         mtd->ecc_stats.corrected += stat;
3044                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
3045                 }
3046         }
3047
3048         /* Calculate remaining oob bytes */
3049         i = mtd->oobsize - (oob - chip->oob_poi);
3050         if (i) {
3051                 ret = nand_read_data_op(chip, oob, i, false);
3052                 if (ret)
3053                         return ret;
3054         }
3055
3056         return max_bitflips;
3057 }
3058
3059 /**
3060  * nand_transfer_oob - [INTERN] Transfer oob to client buffer
3061  * @chip: NAND chip object
3062  * @oob: oob destination address
3063  * @ops: oob ops structure
3064  * @len: size of oob to transfer
3065  */
3066 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
3067                                   struct mtd_oob_ops *ops, size_t len)
3068 {
3069         struct mtd_info *mtd = nand_to_mtd(chip);
3070         int ret;
3071
3072         switch (ops->mode) {
3073
3074         case MTD_OPS_PLACE_OOB:
3075         case MTD_OPS_RAW:
3076                 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3077                 return oob + len;
3078
3079         case MTD_OPS_AUTO_OOB:
3080                 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3081                                                   ops->ooboffs, len);
3082                 BUG_ON(ret);
3083                 return oob + len;
3084
3085         default:
3086                 BUG();
3087         }
3088         return NULL;
3089 }
3090
3091 /**
3092  * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
3093  * @chip: NAND chip object
3094  * @retry_mode: the retry mode to use
3095  *
3096  * Some vendors supply a special command to shift the Vt threshold, to be used
3097  * when there are too many bitflips in a page (i.e., ECC error). After setting
3098  * a new threshold, the host should retry reading the page.
3099  */
3100 static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
3101 {
3102         pr_debug("setting READ RETRY mode %d\n", retry_mode);
3103
3104         if (retry_mode >= chip->read_retries)
3105                 return -EINVAL;
3106
3107         if (!chip->setup_read_retry)
3108                 return -EOPNOTSUPP;
3109
3110         return chip->setup_read_retry(chip, retry_mode);
3111 }
3112
3113 static void nand_wait_readrdy(struct nand_chip *chip)
3114 {
3115         const struct nand_sdr_timings *sdr;
3116
3117         if (!(chip->options & NAND_NEED_READRDY))
3118                 return;
3119
3120         sdr = nand_get_sdr_timings(&chip->data_interface);
3121         WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
3122 }
3123
3124 /**
3125  * nand_do_read_ops - [INTERN] Read data with ECC
3126  * @chip: NAND chip object
3127  * @from: offset to read from
3128  * @ops: oob ops structure
3129  *
3130  * Internal function. Called with chip held.
3131  */
3132 static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
3133                             struct mtd_oob_ops *ops)
3134 {
3135         int chipnr, page, realpage, col, bytes, aligned, oob_required;
3136         struct mtd_info *mtd = nand_to_mtd(chip);
3137         int ret = 0;
3138         uint32_t readlen = ops->len;
3139         uint32_t oobreadlen = ops->ooblen;
3140         uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3141
3142         uint8_t *bufpoi, *oob, *buf;
3143         int use_bufpoi;
3144         unsigned int max_bitflips = 0;
3145         int retry_mode = 0;
3146         bool ecc_fail = false;
3147
3148         chipnr = (int)(from >> chip->chip_shift);
3149         nand_select_target(chip, chipnr);
3150
3151         realpage = (int)(from >> chip->page_shift);
3152         page = realpage & chip->pagemask;
3153
3154         col = (int)(from & (mtd->writesize - 1));
3155
3156         buf = ops->datbuf;
3157         oob = ops->oobbuf;
3158         oob_required = oob ? 1 : 0;
3159
3160         while (1) {
3161                 unsigned int ecc_failures = mtd->ecc_stats.failed;
3162
3163                 bytes = min(mtd->writesize - col, readlen);
3164                 aligned = (bytes == mtd->writesize);
3165
3166                 if (!aligned)
3167                         use_bufpoi = 1;
3168                 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
3169                         use_bufpoi = !virt_addr_valid(buf) ||
3170                                      !IS_ALIGNED((unsigned long)buf,
3171                                                  chip->buf_align);
3172                 else
3173                         use_bufpoi = 0;
3174
3175                 /* Is the current page in the buffer? */
3176                 if (realpage != chip->pagecache.page || oob) {
3177                         bufpoi = use_bufpoi ? chip->data_buf : buf;
3178
3179                         if (use_bufpoi && aligned)
3180                                 pr_debug("%s: using read bounce buffer for buf@%p\n",
3181                                                  __func__, buf);
3182
3183 read_retry:
3184                         /*
3185                          * Now read the page into the buffer.  Absent an error,
3186                          * the read methods return max bitflips per ecc step.
3187                          */
3188                         if (unlikely(ops->mode == MTD_OPS_RAW))
3189                                 ret = chip->ecc.read_page_raw(chip, bufpoi,
3190                                                               oob_required,
3191                                                               page);
3192                         else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3193                                  !oob)
3194                                 ret = chip->ecc.read_subpage(chip, col, bytes,
3195                                                              bufpoi, page);
3196                         else
3197                                 ret = chip->ecc.read_page(chip, bufpoi,
3198                                                           oob_required, page);
3199                         if (ret < 0) {
3200                                 if (use_bufpoi)
3201                                         /* Invalidate page cache */
3202                                         chip->pagecache.page = -1;
3203                                 break;
3204                         }
3205
3206                         /* Transfer not aligned data */
3207                         if (use_bufpoi) {
3208                                 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3209                                     !(mtd->ecc_stats.failed - ecc_failures) &&
3210                                     (ops->mode != MTD_OPS_RAW)) {
3211                                         chip->pagecache.page = realpage;
3212                                         chip->pagecache.bitflips = ret;
3213                                 } else {
3214                                         /* Invalidate page cache */
3215                                         chip->pagecache.page = -1;
3216                                 }
3217                                 memcpy(buf, chip->data_buf + col, bytes);
3218                         }
3219
3220                         if (unlikely(oob)) {
3221                                 int toread = min(oobreadlen, max_oobsize);
3222
3223                                 if (toread) {
3224                                         oob = nand_transfer_oob(chip, oob, ops,
3225                                                                 toread);
3226                                         oobreadlen -= toread;
3227                                 }
3228                         }
3229
3230                         nand_wait_readrdy(chip);
3231
3232                         if (mtd->ecc_stats.failed - ecc_failures) {
3233                                 if (retry_mode + 1 < chip->read_retries) {
3234                                         retry_mode++;
3235                                         ret = nand_setup_read_retry(chip,
3236                                                         retry_mode);
3237                                         if (ret < 0)
3238                                                 break;
3239
3240                                         /* Reset failures; retry */
3241                                         mtd->ecc_stats.failed = ecc_failures;
3242                                         goto read_retry;
3243                                 } else {
3244                                         /* No more retry modes; real failure */
3245                                         ecc_fail = true;
3246                                 }
3247                         }
3248
3249                         buf += bytes;
3250                         max_bitflips = max_t(unsigned int, max_bitflips, ret);
3251                 } else {
3252                         memcpy(buf, chip->data_buf + col, bytes);
3253                         buf += bytes;
3254                         max_bitflips = max_t(unsigned int, max_bitflips,
3255                                              chip->pagecache.bitflips);
3256                 }
3257
3258                 readlen -= bytes;
3259
3260                 /* Reset to retry mode 0 */
3261                 if (retry_mode) {
3262                         ret = nand_setup_read_retry(chip, 0);
3263                         if (ret < 0)
3264                                 break;
3265                         retry_mode = 0;
3266                 }
3267
3268                 if (!readlen)
3269                         break;
3270
3271                 /* For subsequent reads align to page boundary */
3272                 col = 0;
3273                 /* Increment page address */
3274                 realpage++;
3275
3276                 page = realpage & chip->pagemask;
3277                 /* Check, if we cross a chip boundary */
3278                 if (!page) {
3279                         chipnr++;
3280                         nand_deselect_target(chip);
3281                         nand_select_target(chip, chipnr);
3282                 }
3283         }
3284         nand_deselect_target(chip);
3285
3286         ops->retlen = ops->len - (size_t) readlen;
3287         if (oob)
3288                 ops->oobretlen = ops->ooblen - oobreadlen;
3289
3290         if (ret < 0)
3291                 return ret;
3292
3293         if (ecc_fail)
3294                 return -EBADMSG;
3295
3296         return max_bitflips;
3297 }
3298
3299 /**
3300  * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
3301  * @chip: nand chip info structure
3302  * @page: page number to read
3303  */
3304 int nand_read_oob_std(struct nand_chip *chip, int page)
3305 {
3306         struct mtd_info *mtd = nand_to_mtd(chip);
3307
3308         return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3309 }
3310 EXPORT_SYMBOL(nand_read_oob_std);
3311
3312 /**
3313  * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
3314  *                          with syndromes
3315  * @chip: nand chip info structure
3316  * @page: page number to read
3317  */
3318 static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
3319 {
3320         struct mtd_info *mtd = nand_to_mtd(chip);
3321         int length = mtd->oobsize;
3322         int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3323         int eccsize = chip->ecc.size;
3324         uint8_t *bufpoi = chip->oob_poi;
3325         int i, toread, sndrnd = 0, pos, ret;
3326
3327         ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3328         if (ret)
3329                 return ret;
3330
3331         for (i = 0; i < chip->ecc.steps; i++) {
3332                 if (sndrnd) {
3333                         int ret;
3334
3335                         pos = eccsize + i * (eccsize + chunk);
3336                         if (mtd->writesize > 512)
3337                                 ret = nand_change_read_column_op(chip, pos,
3338                                                                  NULL, 0,
3339                                                                  false);
3340                         else
3341                                 ret = nand_read_page_op(chip, page, pos, NULL,
3342                                                         0);
3343
3344                         if (ret)
3345                                 return ret;
3346                 } else
3347                         sndrnd = 1;
3348                 toread = min_t(int, length, chunk);
3349
3350                 ret = nand_read_data_op(chip, bufpoi, toread, false);
3351                 if (ret)
3352                         return ret;
3353
3354                 bufpoi += toread;
3355                 length -= toread;
3356         }
3357         if (length > 0) {
3358                 ret = nand_read_data_op(chip, bufpoi, length, false);
3359                 if (ret)
3360                         return ret;
3361         }
3362
3363         return 0;
3364 }
3365
3366 /**
3367  * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
3368  * @chip: nand chip info structure
3369  * @page: page number to write
3370  */
3371 int nand_write_oob_std(struct nand_chip *chip, int page)
3372 {
3373         struct mtd_info *mtd = nand_to_mtd(chip);
3374
3375         return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3376                                  mtd->oobsize);
3377 }
3378 EXPORT_SYMBOL(nand_write_oob_std);
3379
3380 /**
3381  * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
3382  *                           with syndrome - only for large page flash
3383  * @chip: nand chip info structure
3384  * @page: page number to write
3385  */
3386 static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
3387 {
3388         struct mtd_info *mtd = nand_to_mtd(chip);
3389         int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3390         int eccsize = chip->ecc.size, length = mtd->oobsize;
3391         int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3392         const uint8_t *bufpoi = chip->oob_poi;
3393
3394         /*
3395          * data-ecc-data-ecc ... ecc-oob
3396          * or
3397          * data-pad-ecc-pad-data-pad .... ecc-pad-oob
3398          */
3399         if (!chip->ecc.prepad && !chip->ecc.postpad) {
3400                 pos = steps * (eccsize + chunk);
3401                 steps = 0;
3402         } else
3403                 pos = eccsize;
3404
3405         ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3406         if (ret)
3407                 return ret;
3408
3409         for (i = 0; i < steps; i++) {
3410                 if (sndcmd) {
3411                         if (mtd->writesize <= 512) {
3412                                 uint32_t fill = 0xFFFFFFFF;
3413
3414                                 len = eccsize;
3415                                 while (len > 0) {
3416                                         int num = min_t(int, len, 4);
3417
3418                                         ret = nand_write_data_op(chip, &fill,
3419                                                                  num, false);
3420                                         if (ret)
3421                                                 return ret;
3422
3423                                         len -= num;
3424                                 }
3425                         } else {
3426                                 pos = eccsize + i * (eccsize + chunk);
3427                                 ret = nand_change_write_column_op(chip, pos,
3428                                                                   NULL, 0,
3429                                                                   false);
3430                                 if (ret)
3431                                         return ret;
3432                         }
3433                 } else
3434                         sndcmd = 1;
3435                 len = min_t(int, length, chunk);
3436
3437                 ret = nand_write_data_op(chip, bufpoi, len, false);
3438                 if (ret)
3439                         return ret;
3440
3441                 bufpoi += len;
3442                 length -= len;
3443         }
3444         if (length > 0) {
3445                 ret = nand_write_data_op(chip, bufpoi, length, false);
3446                 if (ret)
3447                         return ret;
3448         }
3449
3450         return nand_prog_page_end_op(chip);
3451 }
3452
3453 /**
3454  * nand_do_read_oob - [INTERN] NAND read out-of-band
3455  * @chip: NAND chip object
3456  * @from: offset to read from
3457  * @ops: oob operations description structure
3458  *
3459  * NAND read out-of-band data from the spare area.
3460  */
3461 static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
3462                             struct mtd_oob_ops *ops)
3463 {
3464         struct mtd_info *mtd = nand_to_mtd(chip);
3465         unsigned int max_bitflips = 0;
3466         int page, realpage, chipnr;
3467         struct mtd_ecc_stats stats;
3468         int readlen = ops->ooblen;
3469         int len;
3470         uint8_t *buf = ops->oobbuf;
3471         int ret = 0;
3472
3473         pr_debug("%s: from = 0x%08Lx, len = %i\n",
3474                         __func__, (unsigned long long)from, readlen);
3475
3476         stats = mtd->ecc_stats;
3477
3478         len = mtd_oobavail(mtd, ops);
3479
3480         chipnr = (int)(from >> chip->chip_shift);
3481         nand_select_target(chip, chipnr);
3482
3483         /* Shift to get page */
3484         realpage = (int)(from >> chip->page_shift);
3485         page = realpage & chip->pagemask;
3486
3487         while (1) {
3488                 if (ops->mode == MTD_OPS_RAW)
3489                         ret = chip->ecc.read_oob_raw(chip, page);
3490                 else
3491                         ret = chip->ecc.read_oob(chip, page);
3492
3493                 if (ret < 0)
3494                         break;
3495
3496                 len = min(len, readlen);
3497                 buf = nand_transfer_oob(chip, buf, ops, len);
3498
3499                 nand_wait_readrdy(chip);
3500
3501                 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3502
3503                 readlen -= len;
3504                 if (!readlen)
3505                         break;
3506
3507                 /* Increment page address */
3508                 realpage++;
3509
3510                 page = realpage & chip->pagemask;
3511                 /* Check, if we cross a chip boundary */
3512                 if (!page) {
3513                         chipnr++;
3514                         nand_deselect_target(chip);
3515                         nand_select_target(chip, chipnr);
3516                 }
3517         }
3518         nand_deselect_target(chip);
3519
3520         ops->oobretlen = ops->ooblen - readlen;
3521
3522         if (ret < 0)
3523                 return ret;
3524
3525         if (mtd->ecc_stats.failed - stats.failed)
3526                 return -EBADMSG;
3527
3528         return max_bitflips;
3529 }
3530
3531 /**
3532  * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
3533  * @mtd: MTD device structure
3534  * @from: offset to read from
3535  * @ops: oob operation description structure
3536  *
3537  * NAND read data and/or out-of-band data.
3538  */
3539 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3540                          struct mtd_oob_ops *ops)
3541 {
3542         struct nand_chip *chip = mtd_to_nand(mtd);
3543         int ret;
3544
3545         ops->retlen = 0;
3546
3547         if (ops->mode != MTD_OPS_PLACE_OOB &&
3548             ops->mode != MTD_OPS_AUTO_OOB &&
3549             ops->mode != MTD_OPS_RAW)
3550                 return -ENOTSUPP;
3551
3552         ret = nand_get_device(chip);
3553         if (ret)
3554                 return ret;
3555
3556         if (!ops->datbuf)
3557                 ret = nand_do_read_oob(chip, from, ops);
3558         else
3559                 ret = nand_do_read_ops(chip, from, ops);
3560
3561         nand_release_device(chip);
3562         return ret;
3563 }
3564
3565 /**
3566  * nand_write_page_raw_notsupp - dummy raw page write function
3567  * @chip: nand chip info structure
3568  * @buf: data buffer
3569  * @oob_required: must write chip->oob_poi to OOB
3570  * @page: page number to write
3571  *
3572  * Returns -ENOTSUPP unconditionally.
3573  */
3574 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
3575                                 int oob_required, int page)
3576 {
3577         return -ENOTSUPP;
3578 }
3579
3580 /**
3581  * nand_write_page_raw - [INTERN] raw page write function
3582  * @chip: nand chip info structure
3583  * @buf: data buffer
3584  * @oob_required: must write chip->oob_poi to OOB
3585  * @page: page number to write
3586  *
3587  * Not for syndrome calculating ECC controllers, which use a special oob layout.
3588  */
3589 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
3590                         int oob_required, int page)
3591 {
3592         struct mtd_info *mtd = nand_to_mtd(chip);
3593         int ret;
3594
3595         ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3596         if (ret)
3597                 return ret;
3598
3599         if (oob_required) {
3600                 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3601                                          false);
3602                 if (ret)
3603                         return ret;
3604         }
3605
3606         return nand_prog_page_end_op(chip);
3607 }
3608 EXPORT_SYMBOL(nand_write_page_raw);
3609
3610 /**
3611  * nand_write_page_raw_syndrome - [INTERN] raw page write function
3612  * @chip: nand chip info structure
3613  * @buf: data buffer
3614  * @oob_required: must write chip->oob_poi to OOB
3615  * @page: page number to write
3616  *
3617  * We need a special oob layout and handling even when ECC isn't checked.
3618  */
3619 static int nand_write_page_raw_syndrome(struct nand_chip *chip,
3620                                         const uint8_t *buf, int oob_required,
3621                                         int page)
3622 {
3623         struct mtd_info *mtd = nand_to_mtd(chip);
3624         int eccsize = chip->ecc.size;
3625         int eccbytes = chip->ecc.bytes;
3626         uint8_t *oob = chip->oob_poi;
3627         int steps, size, ret;
3628
3629         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3630         if (ret)
3631                 return ret;
3632
3633         for (steps = chip->ecc.steps; steps > 0; steps--) {
3634                 ret = nand_write_data_op(chip, buf, eccsize, false);
3635                 if (ret)
3636                         return ret;
3637
3638                 buf += eccsize;
3639
3640                 if (chip->ecc.prepad) {
3641                         ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3642                                                  false);
3643                         if (ret)
3644                                 return ret;
3645
3646                         oob += chip->ecc.prepad;
3647                 }
3648
3649                 ret = nand_write_data_op(chip, oob, eccbytes, false);
3650                 if (ret)
3651                         return ret;
3652
3653                 oob += eccbytes;
3654
3655                 if (chip->ecc.postpad) {
3656                         ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3657                                                  false);
3658                         if (ret)
3659                                 return ret;
3660
3661                         oob += chip->ecc.postpad;
3662                 }
3663         }
3664
3665         size = mtd->oobsize - (oob - chip->oob_poi);
3666         if (size) {
3667                 ret = nand_write_data_op(chip, oob, size, false);
3668                 if (ret)
3669                         return ret;
3670         }
3671
3672         return nand_prog_page_end_op(chip);
3673 }
3674 /**
3675  * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
3676  * @chip: nand chip info structure
3677  * @buf: data buffer
3678  * @oob_required: must write chip->oob_poi to OOB
3679  * @page: page number to write
3680  */
3681 static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
3682                                  int oob_required, int page)
3683 {
3684         struct mtd_info *mtd = nand_to_mtd(chip);
3685         int i, eccsize = chip->ecc.size, ret;
3686         int eccbytes = chip->ecc.bytes;
3687         int eccsteps = chip->ecc.steps;
3688         uint8_t *ecc_calc = chip->ecc.calc_buf;
3689         const uint8_t *p = buf;
3690
3691         /* Software ECC calculation */
3692         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
3693                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3694
3695         ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3696                                          chip->ecc.total);
3697         if (ret)
3698                 return ret;
3699
3700         return chip->ecc.write_page_raw(chip, buf, 1, page);
3701 }
3702
3703 /**
3704  * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
3705  * @chip: nand chip info structure
3706  * @buf: data buffer
3707  * @oob_required: must write chip->oob_poi to OOB
3708  * @page: page number to write
3709  */
3710 static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
3711                                  int oob_required, int page)
3712 {
3713         struct mtd_info *mtd = nand_to_mtd(chip);
3714         int i, eccsize = chip->ecc.size, ret;
3715         int eccbytes = chip->ecc.bytes;
3716         int eccsteps = chip->ecc.steps;
3717         uint8_t *ecc_calc = chip->ecc.calc_buf;
3718         const uint8_t *p = buf;
3719
3720         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3721         if (ret)
3722                 return ret;
3723
3724         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3725                 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3726
3727                 ret = nand_write_data_op(chip, p, eccsize, false);
3728                 if (ret)
3729                         return ret;
3730
3731                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3732         }
3733
3734         ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3735                                          chip->ecc.total);
3736         if (ret)
3737                 return ret;
3738
3739         ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3740         if (ret)
3741                 return ret;
3742
3743         return nand_prog_page_end_op(chip);
3744 }
3745
3746
3747 /**
3748  * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
3749  * @chip:       nand chip info structure
3750  * @offset:     column address of subpage within the page
3751  * @data_len:   data length
3752  * @buf:        data buffer
3753  * @oob_required: must write chip->oob_poi to OOB
3754  * @page: page number to write
3755  */
3756 static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
3757                                     uint32_t data_len, const uint8_t *buf,
3758                                     int oob_required, int page)
3759 {
3760         struct mtd_info *mtd = nand_to_mtd(chip);
3761         uint8_t *oob_buf  = chip->oob_poi;
3762         uint8_t *ecc_calc = chip->ecc.calc_buf;
3763         int ecc_size      = chip->ecc.size;
3764         int ecc_bytes     = chip->ecc.bytes;
3765         int ecc_steps     = chip->ecc.steps;
3766         uint32_t start_step = offset / ecc_size;
3767         uint32_t end_step   = (offset + data_len - 1) / ecc_size;
3768         int oob_bytes       = mtd->oobsize / ecc_steps;
3769         int step, ret;
3770
3771         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3772         if (ret)
3773                 return ret;
3774
3775         for (step = 0; step < ecc_steps; step++) {
3776                 /* configure controller for WRITE access */
3777                 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3778
3779                 /* write data (untouched subpages already masked by 0xFF) */
3780                 ret = nand_write_data_op(chip, buf, ecc_size, false);
3781                 if (ret)
3782                         return ret;
3783
3784                 /* mask ECC of un-touched subpages by padding 0xFF */
3785                 if ((step < start_step) || (step > end_step))
3786                         memset(ecc_calc, 0xff, ecc_bytes);
3787                 else
3788                         chip->ecc.calculate(chip, buf, ecc_calc);
3789
3790                 /* mask OOB of un-touched subpages by padding 0xFF */
3791                 /* if oob_required, preserve OOB metadata of written subpage */
3792                 if (!oob_required || (step < start_step) || (step > end_step))
3793                         memset(oob_buf, 0xff, oob_bytes);
3794
3795                 buf += ecc_size;
3796                 ecc_calc += ecc_bytes;
3797                 oob_buf  += oob_bytes;
3798         }
3799
3800         /* copy calculated ECC for whole page to chip->buffer->oob */
3801         /* this include masked-value(0xFF) for unwritten subpages */
3802         ecc_calc = chip->ecc.calc_buf;
3803         ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3804                                          chip->ecc.total);
3805         if (ret)
3806                 return ret;
3807
3808         /* write OOB buffer to NAND device */
3809         ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3810         if (ret)
3811                 return ret;
3812
3813         return nand_prog_page_end_op(chip);
3814 }
3815
3816
3817 /**
3818  * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
3819  * @chip: nand chip info structure
3820  * @buf: data buffer
3821  * @oob_required: must write chip->oob_poi to OOB
3822  * @page: page number to write
3823  *
3824  * The hw generator calculates the error syndrome automatically. Therefore we
3825  * need a special oob layout and handling.
3826  */
3827 static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
3828                                     int oob_required, int page)
3829 {
3830         struct mtd_info *mtd = nand_to_mtd(chip);
3831         int i, eccsize = chip->ecc.size;
3832         int eccbytes = chip->ecc.bytes;
3833         int eccsteps = chip->ecc.steps;
3834         const uint8_t *p = buf;
3835         uint8_t *oob = chip->oob_poi;
3836         int ret;
3837
3838         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3839         if (ret)
3840                 return ret;
3841
3842         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3843                 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3844
3845                 ret = nand_write_data_op(chip, p, eccsize, false);
3846                 if (ret)
3847                         return ret;
3848
3849                 if (chip->ecc.prepad) {
3850                         ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3851                                                  false);
3852                         if (ret)
3853                                 return ret;
3854
3855                         oob += chip->ecc.prepad;
3856                 }
3857
3858                 chip->ecc.calculate(chip, p, oob);
3859
3860                 ret = nand_write_data_op(chip, oob, eccbytes, false);
3861                 if (ret)
3862                         return ret;
3863
3864                 oob += eccbytes;
3865
3866                 if (chip->ecc.postpad) {
3867                         ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3868                                                  false);
3869                         if (ret)
3870                                 return ret;
3871
3872                         oob += chip->ecc.postpad;
3873                 }
3874         }
3875
3876         /* Calculate remaining oob bytes */
3877         i = mtd->oobsize - (oob - chip->oob_poi);
3878         if (i) {
3879                 ret = nand_write_data_op(chip, oob, i, false);
3880                 if (ret)
3881                         return ret;
3882         }
3883
3884         return nand_prog_page_end_op(chip);
3885 }
3886
3887 /**
3888  * nand_write_page - write one page
3889  * @chip: NAND chip descriptor
3890  * @offset: address offset within the page
3891  * @data_len: length of actual data to be written
3892  * @buf: the data to write
3893  * @oob_required: must write chip->oob_poi to OOB
3894  * @page: page number to write
3895  * @raw: use _raw version of write_page
3896  */
3897 static int nand_write_page(struct nand_chip *chip, uint32_t offset,
3898                            int data_len, const uint8_t *buf, int oob_required,
3899                            int page, int raw)
3900 {
3901         struct mtd_info *mtd = nand_to_mtd(chip);
3902         int status, subpage;
3903
3904         if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
3905                 chip->ecc.write_subpage)
3906                 subpage = offset || (data_len < mtd->writesize);
3907         else
3908                 subpage = 0;
3909
3910         if (unlikely(raw))
3911                 status = chip->ecc.write_page_raw(chip, buf, oob_required,
3912                                                   page);
3913         else if (subpage)
3914                 status = chip->ecc.write_subpage(chip, offset, data_len, buf,
3915                                                  oob_required, page);
3916         else
3917                 status = chip->ecc.write_page(chip, buf, oob_required, page);
3918
3919         if (status < 0)
3920                 return status;
3921
3922         return 0;
3923 }
3924
3925 #define NOTALIGNED(x)   ((x & (chip->subpagesize - 1)) != 0)
3926
3927 /**
3928  * nand_do_write_ops - [INTERN] NAND write with ECC
3929  * @chip: NAND chip object
3930  * @to: offset to write to
3931  * @ops: oob operations description structure
3932  *
3933  * NAND write with ECC.
3934  */
3935 static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
3936                              struct mtd_oob_ops *ops)
3937 {
3938         struct mtd_info *mtd = nand_to_mtd(chip);
3939         int chipnr, realpage, page, column;
3940         uint32_t writelen = ops->len;
3941
3942         uint32_t oobwritelen = ops->ooblen;
3943         uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
3944
3945         uint8_t *oob = ops->oobbuf;
3946         uint8_t *buf = ops->datbuf;
3947         int ret;
3948         int oob_required = oob ? 1 : 0;
3949
3950         ops->retlen = 0;
3951         if (!writelen)
3952                 return 0;
3953
3954         /* Reject writes, which are not page aligned */
3955         if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
3956                 pr_notice("%s: attempt to write non page aligned data\n",
3957                            __func__);
3958                 return -EINVAL;
3959         }
3960
3961         column = to & (mtd->writesize - 1);
3962
3963         chipnr = (int)(to >> chip->chip_shift);
3964         nand_select_target(chip, chipnr);
3965
3966         /* Check, if it is write protected */
3967         if (nand_check_wp(chip)) {
3968                 ret = -EIO;
3969                 goto err_out;
3970         }
3971
3972         realpage = (int)(to >> chip->page_shift);
3973         page = realpage & chip->pagemask;
3974
3975         /* Invalidate the page cache, when we write to the cached page */
3976         if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
3977             ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
3978                 chip->pagecache.page = -1;
3979
3980         /* Don't allow multipage oob writes with offset */
3981         if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
3982                 ret = -EINVAL;
3983                 goto err_out;
3984         }
3985
3986         while (1) {
3987                 int bytes = mtd->writesize;
3988                 uint8_t *wbuf = buf;
3989                 int use_bufpoi;
3990                 int part_pagewr = (column || writelen < mtd->writesize);
3991
3992                 if (part_pagewr)
3993                         use_bufpoi = 1;
3994                 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
3995                         use_bufpoi = !virt_addr_valid(buf) ||
3996                                      !IS_ALIGNED((unsigned long)buf,
3997                                                  chip->buf_align);
3998                 else
3999                         use_bufpoi = 0;
4000
4001                 /* Partial page write?, or need to use bounce buffer */
4002                 if (use_bufpoi) {
4003                         pr_debug("%s: using write bounce buffer for buf@%p\n",
4004                                          __func__, buf);
4005                         if (part_pagewr)
4006                                 bytes = min_t(int, bytes - column, writelen);
4007                         wbuf = nand_get_data_buf(chip);
4008                         memset(wbuf, 0xff, mtd->writesize);
4009                         memcpy(&wbuf[column], buf, bytes);
4010                 }
4011
4012                 if (unlikely(oob)) {
4013                         size_t len = min(oobwritelen, oobmaxlen);
4014                         oob = nand_fill_oob(chip, oob, len, ops);
4015                         oobwritelen -= len;
4016                 } else {
4017                         /* We still need to erase leftover OOB data */
4018                         memset(chip->oob_poi, 0xff, mtd->oobsize);
4019                 }
4020
4021                 ret = nand_write_page(chip, column, bytes, wbuf,
4022                                       oob_required, page,
4023                                       (ops->mode == MTD_OPS_RAW));
4024                 if (ret)
4025                         break;
4026
4027                 writelen -= bytes;
4028                 if (!writelen)
4029                         break;
4030
4031                 column = 0;
4032                 buf += bytes;
4033                 realpage++;
4034
4035                 page = realpage & chip->pagemask;
4036                 /* Check, if we cross a chip boundary */
4037                 if (!page) {
4038                         chipnr++;
4039                         nand_deselect_target(chip);
4040                         nand_select_target(chip, chipnr);
4041                 }
4042         }
4043
4044         ops->retlen = ops->len - writelen;
4045         if (unlikely(oob))
4046                 ops->oobretlen = ops->ooblen;
4047
4048 err_out:
4049         nand_deselect_target(chip);
4050         return ret;
4051 }
4052
4053 /**
4054  * panic_nand_write - [MTD Interface] NAND write with ECC
4055  * @mtd: MTD device structure
4056  * @to: offset to write to
4057  * @len: number of bytes to write
4058  * @retlen: pointer to variable to store the number of written bytes
4059  * @buf: the data to write
4060  *
4061  * NAND write with ECC. Used when performing writes in interrupt context, this
4062  * may for example be called by mtdoops when writing an oops while in panic.
4063  */
4064 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4065                             size_t *retlen, const uint8_t *buf)
4066 {
4067         struct nand_chip *chip = mtd_to_nand(mtd);
4068         int chipnr = (int)(to >> chip->chip_shift);
4069         struct mtd_oob_ops ops;
4070         int ret;
4071
4072         nand_select_target(chip, chipnr);
4073
4074         /* Wait for the device to get ready */
4075         panic_nand_wait(chip, 400);
4076
4077         memset(&ops, 0, sizeof(ops));
4078         ops.len = len;
4079         ops.datbuf = (uint8_t *)buf;
4080         ops.mode = MTD_OPS_PLACE_OOB;
4081
4082         ret = nand_do_write_ops(chip, to, &ops);
4083
4084         *retlen = ops.retlen;
4085         return ret;
4086 }
4087
4088 /**
4089  * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
4090  * @mtd: MTD device structure
4091  * @to: offset to write to
4092  * @ops: oob operation description structure
4093  */
4094 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4095                           struct mtd_oob_ops *ops)
4096 {
4097         struct nand_chip *chip = mtd_to_nand(mtd);
4098         int ret = -ENOTSUPP;
4099
4100         ops->retlen = 0;
4101
4102         ret = nand_get_device(chip);
4103         if (ret)
4104                 return ret;
4105
4106         switch (ops->mode) {
4107         case MTD_OPS_PLACE_OOB:
4108         case MTD_OPS_AUTO_OOB:
4109         case MTD_OPS_RAW:
4110                 break;
4111
4112         default:
4113                 goto out;
4114         }
4115
4116         if (!ops->datbuf)
4117                 ret = nand_do_write_oob(chip, to, ops);
4118         else
4119                 ret = nand_do_write_ops(chip, to, ops);
4120
4121 out:
4122         nand_release_device(chip);
4123         return ret;
4124 }
4125
4126 /**
4127  * nand_erase - [MTD Interface] erase block(s)
4128  * @mtd: MTD device structure
4129  * @instr: erase instruction
4130  *
4131  * Erase one ore more blocks.
4132  */
4133 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4134 {
4135         return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
4136 }
4137
4138 /**
4139  * nand_erase_nand - [INTERN] erase block(s)
4140  * @chip: NAND chip object
4141  * @instr: erase instruction
4142  * @allowbbt: allow erasing the bbt area
4143  *
4144  * Erase one ore more blocks.
4145  */
4146 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
4147                     int allowbbt)
4148 {
4149         int page, pages_per_block, ret, chipnr;
4150         loff_t len;
4151
4152         pr_debug("%s: start = 0x%012llx, len = %llu\n",
4153                         __func__, (unsigned long long)instr->addr,
4154                         (unsigned long long)instr->len);
4155
4156         if (check_offs_len(chip, instr->addr, instr->len))
4157                 return -EINVAL;
4158
4159         /* Grab the lock and see if the device is available */
4160         ret = nand_get_device(chip);
4161         if (ret)
4162                 return ret;
4163
4164         /* Shift to get first page */
4165         page = (int)(instr->addr >> chip->page_shift);
4166         chipnr = (int)(instr->addr >> chip->chip_shift);
4167
4168         /* Calculate pages in each block */
4169         pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4170
4171         /* Select the NAND device */
4172         nand_select_target(chip, chipnr);
4173
4174         /* Check, if it is write protected */
4175         if (nand_check_wp(chip)) {
4176                 pr_debug("%s: device is write protected!\n",
4177                                 __func__);
4178                 ret = -EIO;
4179                 goto erase_exit;
4180         }
4181
4182         /* Loop through the pages */
4183         len = instr->len;
4184
4185         while (len) {
4186                 /* Check if we have a bad block, we do not erase bad blocks! */
4187                 if (nand_block_checkbad(chip, ((loff_t) page) <<
4188                                         chip->page_shift, allowbbt)) {
4189                         pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4190                                     __func__, page);
4191                         ret = -EIO;
4192                         goto erase_exit;
4193                 }
4194
4195                 /*
4196                  * Invalidate the page cache, if we erase the block which
4197                  * contains the current cached page.
4198                  */
4199                 if (page <= chip->pagecache.page && chip->pagecache.page <
4200                     (page + pages_per_block))
4201                         chip->pagecache.page = -1;
4202
4203                 ret = nand_erase_op(chip, (page & chip->pagemask) >>
4204                                     (chip->phys_erase_shift - chip->page_shift));
4205                 if (ret) {
4206                         pr_debug("%s: failed erase, page 0x%08x\n",
4207                                         __func__, page);
4208                         instr->fail_addr =
4209                                 ((loff_t)page << chip->page_shift);
4210                         goto erase_exit;
4211                 }
4212
4213                 /* Increment page address and decrement length */
4214                 len -= (1ULL << chip->phys_erase_shift);
4215                 page += pages_per_block;
4216
4217                 /* Check, if we cross a chip boundary */
4218                 if (len && !(page & chip->pagemask)) {
4219                         chipnr++;
4220                         nand_deselect_target(chip);
4221                         nand_select_target(chip, chipnr);
4222                 }
4223         }
4224
4225         ret = 0;
4226 erase_exit:
4227
4228         /* Deselect and wake up anyone waiting on the device */
4229         nand_deselect_target(chip);
4230         nand_release_device(chip);
4231
4232         /* Return more or less happy */
4233         return ret;
4234 }
4235
4236 /**
4237  * nand_sync - [MTD Interface] sync
4238  * @mtd: MTD device structure
4239  *
4240  * Sync is actually a wait for chip ready function.
4241  */
4242 static void nand_sync(struct mtd_info *mtd)
4243 {
4244         struct nand_chip *chip = mtd_to_nand(mtd);
4245
4246         pr_debug("%s: called\n", __func__);
4247
4248         /* Grab the lock and see if the device is available */
4249         WARN_ON(nand_get_device(chip));
4250         /* Release it and go back */
4251         nand_release_device(chip);
4252 }
4253
4254 /**
4255  * nand_block_isbad - [MTD Interface] Check if block at offset is bad
4256  * @mtd: MTD device structure
4257  * @offs: offset relative to mtd start
4258  */
4259 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4260 {
4261         struct nand_chip *chip = mtd_to_nand(mtd);
4262         int chipnr = (int)(offs >> chip->chip_shift);
4263         int ret;
4264
4265         /* Select the NAND device */
4266         ret = nand_get_device(chip);
4267         if (ret)
4268                 return ret;
4269
4270         nand_select_target(chip, chipnr);
4271
4272         ret = nand_block_checkbad(chip, offs, 0);
4273
4274         nand_deselect_target(chip);
4275         nand_release_device(chip);
4276
4277         return ret;
4278 }
4279
4280 /**
4281  * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
4282  * @mtd: MTD device structure
4283  * @ofs: offset relative to mtd start
4284  */
4285 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4286 {
4287         int ret;
4288
4289         ret = nand_block_isbad(mtd, ofs);
4290         if (ret) {
4291                 /* If it was bad already, return success and do nothing */
4292                 if (ret > 0)
4293                         return 0;
4294                 return ret;
4295         }
4296
4297         return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
4298 }
4299
4300 /**
4301  * nand_suspend - [MTD Interface] Suspend the NAND flash
4302  * @mtd: MTD device structure
4303  */
4304 static int nand_suspend(struct mtd_info *mtd)
4305 {
4306         struct nand_chip *chip = mtd_to_nand(mtd);
4307
4308         mutex_lock(&chip->lock);
4309         chip->suspended = 1;
4310         mutex_unlock(&chip->lock);
4311
4312         return 0;
4313 }
4314
4315 /**
4316  * nand_resume - [MTD Interface] Resume the NAND flash
4317  * @mtd: MTD device structure
4318  */
4319 static void nand_resume(struct mtd_info *mtd)
4320 {
4321         struct nand_chip *chip = mtd_to_nand(mtd);
4322
4323         mutex_lock(&chip->lock);
4324         if (chip->suspended)
4325                 chip->suspended = 0;
4326         else
4327                 pr_err("%s called for a chip which is not in suspended state\n",
4328                         __func__);
4329         mutex_unlock(&chip->lock);
4330 }
4331
4332 /**
4333  * nand_shutdown - [MTD Interface] Finish the current NAND operation and
4334  *                 prevent further operations
4335  * @mtd: MTD device structure
4336  */
4337 static void nand_shutdown(struct mtd_info *mtd)
4338 {
4339         nand_suspend(mtd);
4340 }
4341
4342 /* Set default functions */
4343 static void nand_set_defaults(struct nand_chip *chip)
4344 {
4345         /* If no controller is provided, use the dummy, legacy one. */
4346         if (!chip->controller) {
4347                 chip->controller = &chip->legacy.dummy_controller;
4348                 nand_controller_init(chip->controller);
4349         }
4350
4351         nand_legacy_set_defaults(chip);
4352
4353         if (!chip->buf_align)
4354                 chip->buf_align = 1;
4355 }
4356
4357 /* Sanitize ONFI strings so we can safely print them */
4358 void sanitize_string(uint8_t *s, size_t len)
4359 {
4360         ssize_t i;
4361
4362         /* Null terminate */
4363         s[len - 1] = 0;
4364
4365         /* Remove non printable chars */
4366         for (i = 0; i < len - 1; i++) {
4367                 if (s[i] < ' ' || s[i] > 127)
4368                         s[i] = '?';
4369         }
4370
4371         /* Remove trailing spaces */
4372         strim(s);
4373 }
4374
4375 /*
4376  * nand_id_has_period - Check if an ID string has a given wraparound period
4377  * @id_data: the ID string
4378  * @arrlen: the length of the @id_data array
4379  * @period: the period of repitition
4380  *
4381  * Check if an ID string is repeated within a given sequence of bytes at
4382  * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
4383  * period of 3). This is a helper function for nand_id_len(). Returns non-zero
4384  * if the repetition has a period of @period; otherwise, returns zero.
4385  */
4386 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
4387 {
4388         int i, j;
4389         for (i = 0; i < period; i++)
4390                 for (j = i + period; j < arrlen; j += period)
4391                         if (id_data[i] != id_data[j])
4392                                 return 0;
4393         return 1;
4394 }
4395
4396 /*
4397  * nand_id_len - Get the length of an ID string returned by CMD_READID
4398  * @id_data: the ID string
4399  * @arrlen: the length of the @id_data array
4400
4401  * Returns the length of the ID string, according to known wraparound/trailing
4402  * zero patterns. If no pattern exists, returns the length of the array.
4403  */
4404 static int nand_id_len(u8 *id_data, int arrlen)
4405 {
4406         int last_nonzero, period;
4407
4408         /* Find last non-zero byte */
4409         for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
4410                 if (id_data[last_nonzero])
4411                         break;
4412
4413         /* All zeros */
4414         if (last_nonzero < 0)
4415                 return 0;
4416
4417         /* Calculate wraparound period */
4418         for (period = 1; period < arrlen; period++)
4419                 if (nand_id_has_period(id_data, arrlen, period))
4420                         break;
4421
4422         /* There's a repeated pattern */
4423         if (period < arrlen)
4424                 return period;
4425
4426         /* There are trailing zeros */
4427         if (last_nonzero < arrlen - 1)
4428                 return last_nonzero + 1;
4429
4430         /* No pattern detected */
4431         return arrlen;
4432 }
4433
4434 /* Extract the bits of per cell from the 3rd byte of the extended ID */
4435 static int nand_get_bits_per_cell(u8 cellinfo)
4436 {
4437         int bits;
4438
4439         bits = cellinfo & NAND_CI_CELLTYPE_MSK;
4440         bits >>= NAND_CI_CELLTYPE_SHIFT;
4441         return bits + 1;
4442 }
4443
4444 /*
4445  * Many new NAND share similar device ID codes, which represent the size of the
4446  * chip. The rest of the parameters must be decoded according to generic or
4447  * manufacturer-specific "extended ID" decoding patterns.
4448  */
4449 void nand_decode_ext_id(struct nand_chip *chip)
4450 {
4451         struct nand_memory_organization *memorg;
4452         struct mtd_info *mtd = nand_to_mtd(chip);
4453         int extid;
4454         u8 *id_data = chip->id.data;
4455
4456         memorg = nanddev_get_memorg(&chip->base);
4457
4458         /* The 3rd id byte holds MLC / multichip data */
4459         memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4460         /* The 4th id byte is the important one */
4461         extid = id_data[3];
4462
4463         /* Calc pagesize */
4464         memorg->pagesize = 1024 << (extid & 0x03);
4465         mtd->writesize = memorg->pagesize;
4466         extid >>= 2;
4467         /* Calc oobsize */
4468         memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
4469         mtd->oobsize = memorg->oobsize;
4470         extid >>= 2;
4471         /* Calc blocksize. Blocksize is multiples of 64KiB */
4472         memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
4473                                        memorg->pagesize;
4474         mtd->erasesize = (64 * 1024) << (extid & 0x03);
4475         extid >>= 2;
4476         /* Get buswidth information */
4477         if (extid & 0x1)
4478                 chip->options |= NAND_BUSWIDTH_16;
4479 }
4480 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
4481
4482 /*
4483  * Old devices have chip data hardcoded in the device ID table. nand_decode_id
4484  * decodes a matching ID table entry and assigns the MTD size parameters for
4485  * the chip.
4486  */
4487 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
4488 {
4489         struct mtd_info *mtd = nand_to_mtd(chip);
4490         struct nand_memory_organization *memorg;
4491
4492         memorg = nanddev_get_memorg(&chip->base);
4493
4494         memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
4495         mtd->erasesize = type->erasesize;
4496         memorg->pagesize = type->pagesize;
4497         mtd->writesize = memorg->pagesize;
4498         memorg->oobsize = memorg->pagesize / 32;
4499         mtd->oobsize = memorg->oobsize;
4500
4501         /* All legacy ID NAND are small-page, SLC */
4502         memorg->bits_per_cell = 1;
4503 }
4504
4505 /*
4506  * Set the bad block marker/indicator (BBM/BBI) patterns according to some
4507  * heuristic patterns using various detected parameters (e.g., manufacturer,
4508  * page size, cell-type information).
4509  */
4510 static void nand_decode_bbm_options(struct nand_chip *chip)
4511 {
4512         struct mtd_info *mtd = nand_to_mtd(chip);
4513
4514         /* Set the bad block position */
4515         if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
4516                 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
4517         else
4518                 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
4519 }
4520
4521 static inline bool is_full_id_nand(struct nand_flash_dev *type)
4522 {
4523         return type->id_len;
4524 }
4525
4526 static bool find_full_id_nand(struct nand_chip *chip,
4527                               struct nand_flash_dev *type)
4528 {
4529         struct mtd_info *mtd = nand_to_mtd(chip);
4530         struct nand_memory_organization *memorg;
4531         u8 *id_data = chip->id.data;
4532
4533         memorg = nanddev_get_memorg(&chip->base);
4534
4535         if (!strncmp(type->id, id_data, type->id_len)) {
4536                 memorg->pagesize = type->pagesize;
4537                 mtd->writesize = memorg->pagesize;
4538                 memorg->pages_per_eraseblock = type->erasesize /
4539                                                type->pagesize;
4540                 mtd->erasesize = type->erasesize;
4541                 memorg->oobsize = type->oobsize;
4542                 mtd->oobsize = memorg->oobsize;
4543
4544                 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4545                 memorg->eraseblocks_per_lun =
4546                         DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4547                                            memorg->pagesize *
4548                                            memorg->pages_per_eraseblock);
4549                 chip->options |= type->options;
4550                 chip->base.eccreq.strength = NAND_ECC_STRENGTH(type);
4551                 chip->base.eccreq.step_size = NAND_ECC_STEP(type);
4552                 chip->onfi_timing_mode_default =
4553                                         type->onfi_timing_mode_default;
4554
4555                 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4556                 if (!chip->parameters.model)
4557                         return false;
4558
4559                 return true;
4560         }
4561         return false;
4562 }
4563
4564 /*
4565  * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
4566  * compliant and does not have a full-id or legacy-id entry in the nand_ids
4567  * table.
4568  */
4569 static void nand_manufacturer_detect(struct nand_chip *chip)
4570 {
4571         /*
4572          * Try manufacturer detection if available and use
4573          * nand_decode_ext_id() otherwise.
4574          */
4575         if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4576             chip->manufacturer.desc->ops->detect) {
4577                 struct nand_memory_organization *memorg;
4578
4579                 memorg = nanddev_get_memorg(&chip->base);
4580
4581                 /* The 3rd id byte holds MLC / multichip data */
4582                 memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
4583                 chip->manufacturer.desc->ops->detect(chip);
4584         } else {
4585                 nand_decode_ext_id(chip);
4586         }
4587 }
4588
4589 /*
4590  * Manufacturer initialization. This function is called for all NANDs including
4591  * ONFI and JEDEC compliant ones.
4592  * Manufacturer drivers should put all their specific initialization code in
4593  * their ->init() hook.
4594  */
4595 static int nand_manufacturer_init(struct nand_chip *chip)
4596 {
4597         if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4598             !chip->manufacturer.desc->ops->init)
4599                 return 0;
4600
4601         return chip->manufacturer.desc->ops->init(chip);
4602 }
4603
4604 /*
4605  * Manufacturer cleanup. This function is called for all NANDs including
4606  * ONFI and JEDEC compliant ones.
4607  * Manufacturer drivers should put all their specific cleanup code in their
4608  * ->cleanup() hook.
4609  */
4610 static void nand_manufacturer_cleanup(struct nand_chip *chip)
4611 {
4612         /* Release manufacturer private data */
4613         if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4614             chip->manufacturer.desc->ops->cleanup)
4615                 chip->manufacturer.desc->ops->cleanup(chip);
4616 }
4617
4618 static const char *
4619 nand_manufacturer_name(const struct nand_manufacturer *manufacturer)
4620 {
4621         return manufacturer ? manufacturer->name : "Unknown";
4622 }
4623
4624 /*
4625  * Get the flash and manufacturer id and lookup if the type is supported.
4626  */
4627 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4628 {
4629         const struct nand_manufacturer *manufacturer;
4630         struct mtd_info *mtd = nand_to_mtd(chip);
4631         struct nand_memory_organization *memorg;
4632         int busw, ret;
4633         u8 *id_data = chip->id.data;
4634         u8 maf_id, dev_id;
4635         u64 targetsize;
4636
4637         /*
4638          * Let's start by initializing memorg fields that might be left
4639          * unassigned by the ID-based detection logic.
4640          */
4641         memorg = nanddev_get_memorg(&chip->base);
4642         memorg->planes_per_lun = 1;
4643         memorg->luns_per_target = 1;
4644         memorg->ntargets = 1;
4645
4646         /*
4647          * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
4648          * after power-up.
4649          */
4650         ret = nand_reset(chip, 0);
4651         if (ret)
4652                 return ret;
4653
4654         /* Select the device */
4655         nand_select_target(chip, 0);
4656
4657         /* Send the command for reading device ID */
4658         ret = nand_readid_op(chip, 0, id_data, 2);
4659         if (ret)
4660                 return ret;
4661
4662         /* Read manufacturer and device IDs */
4663         maf_id = id_data[0];
4664         dev_id = id_data[1];
4665
4666         /*
4667          * Try again to make sure, as some systems the bus-hold or other
4668          * interface concerns can cause random data which looks like a
4669          * possibly credible NAND flash to appear. If the two results do
4670          * not match, ignore the device completely.
4671          */
4672
4673         /* Read entire ID string */
4674         ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
4675         if (ret)
4676                 return ret;
4677
4678         if (id_data[0] != maf_id || id_data[1] != dev_id) {
4679                 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4680                         maf_id, dev_id, id_data[0], id_data[1]);
4681                 return -ENODEV;
4682         }
4683
4684         chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
4685
4686         /* Try to identify manufacturer */
4687         manufacturer = nand_get_manufacturer(maf_id);
4688         chip->manufacturer.desc = manufacturer;
4689
4690         if (!type)
4691                 type = nand_flash_ids;
4692
4693         /*
4694          * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
4695          * override it.
4696          * This is required to make sure initial NAND bus width set by the
4697          * NAND controller driver is coherent with the real NAND bus width
4698          * (extracted by auto-detection code).
4699          */
4700         busw = chip->options & NAND_BUSWIDTH_16;
4701
4702         /*
4703          * The flag is only set (never cleared), reset it to its default value
4704          * before starting auto-detection.
4705          */
4706         chip->options &= ~NAND_BUSWIDTH_16;
4707
4708         for (; type->name != NULL; type++) {
4709                 if (is_full_id_nand(type)) {
4710                         if (find_full_id_nand(chip, type))
4711                                 goto ident_done;
4712                 } else if (dev_id == type->dev_id) {
4713                         break;
4714                 }
4715         }
4716
4717         if (!type->name || !type->pagesize) {
4718                 /* Check if the chip is ONFI compliant */
4719                 ret = nand_onfi_detect(chip);
4720                 if (ret < 0)
4721                         return ret;
4722                 else if (ret)
4723                         goto ident_done;
4724
4725                 /* Check if the chip is JEDEC compliant */
4726                 ret = nand_jedec_detect(chip);
4727                 if (ret < 0)
4728                         return ret;
4729                 else if (ret)
4730                         goto ident_done;
4731         }
4732
4733         if (!type->name)
4734                 return -ENODEV;
4735
4736         chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4737         if (!chip->parameters.model)
4738                 return -ENOMEM;
4739
4740         if (!type->pagesize)
4741                 nand_manufacturer_detect(chip);
4742         else
4743                 nand_decode_id(chip, type);
4744
4745         /* Get chip options */
4746         chip->options |= type->options;
4747
4748         memorg->eraseblocks_per_lun =
4749                         DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4750                                            memorg->pagesize *
4751                                            memorg->pages_per_eraseblock);
4752
4753 ident_done:
4754         if (!mtd->name)
4755                 mtd->name = chip->parameters.model;
4756
4757         if (chip->options & NAND_BUSWIDTH_AUTO) {
4758                 WARN_ON(busw & NAND_BUSWIDTH_16);
4759                 nand_set_defaults(chip);
4760         } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4761                 /*
4762                  * Check, if buswidth is correct. Hardware drivers should set
4763                  * chip correct!
4764                  */
4765                 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4766                         maf_id, dev_id);
4767                 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4768                         mtd->name);
4769                 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
4770                         (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
4771                 ret = -EINVAL;
4772
4773                 goto free_detect_allocation;
4774         }
4775
4776         nand_decode_bbm_options(chip);
4777
4778         /* Calculate the address shift from the page size */
4779         chip->page_shift = ffs(mtd->writesize) - 1;
4780         /* Convert chipsize to number of pages per chip -1 */
4781         targetsize = nanddev_target_size(&chip->base);
4782         chip->pagemask = (targetsize >> chip->page_shift) - 1;
4783
4784         chip->bbt_erase_shift = chip->phys_erase_shift =
4785                 ffs(mtd->erasesize) - 1;
4786         if (targetsize & 0xffffffff)
4787                 chip->chip_shift = ffs((unsigned)targetsize) - 1;
4788         else {
4789                 chip->chip_shift = ffs((unsigned)(targetsize >> 32));
4790                 chip->chip_shift += 32 - 1;
4791         }
4792
4793         if (chip->chip_shift - chip->page_shift > 16)
4794                 chip->options |= NAND_ROW_ADDR_3;
4795
4796         chip->badblockbits = 8;
4797
4798         nand_legacy_adjust_cmdfunc(chip);
4799
4800         pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4801                 maf_id, dev_id);
4802         pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4803                 chip->parameters.model);
4804         pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4805                 (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4806                 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4807         return 0;
4808
4809 free_detect_allocation:
4810         kfree(chip->parameters.model);
4811
4812         return ret;
4813 }
4814
4815 static const char * const nand_ecc_modes[] = {
4816         [NAND_ECC_NONE]         = "none",
4817         [NAND_ECC_SOFT]         = "soft",
4818         [NAND_ECC_HW]           = "hw",
4819         [NAND_ECC_HW_SYNDROME]  = "hw_syndrome",
4820         [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
4821         [NAND_ECC_ON_DIE]       = "on-die",
4822 };
4823
4824 static int of_get_nand_ecc_mode(struct device_node *np)
4825 {
4826         const char *pm;
4827         int err, i;
4828
4829         err = of_property_read_string(np, "nand-ecc-mode", &pm);
4830         if (err < 0)
4831                 return err;
4832
4833         for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
4834                 if (!strcasecmp(pm, nand_ecc_modes[i]))
4835                         return i;
4836
4837         /*
4838          * For backward compatibility we support few obsoleted values that don't
4839          * have their mappings into nand_ecc_modes_t anymore (they were merged
4840          * with other enums).
4841          */
4842         if (!strcasecmp(pm, "soft_bch"))
4843                 return NAND_ECC_SOFT;
4844
4845         return -ENODEV;
4846 }
4847
4848 static const char * const nand_ecc_algos[] = {
4849         [NAND_ECC_HAMMING]      = "hamming",
4850         [NAND_ECC_BCH]          = "bch",
4851         [NAND_ECC_RS]           = "rs",
4852 };
4853
4854 static int of_get_nand_ecc_algo(struct device_node *np)
4855 {
4856         const char *pm;
4857         int err, i;
4858
4859         err = of_property_read_string(np, "nand-ecc-algo", &pm);
4860         if (!err) {
4861                 for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
4862                         if (!strcasecmp(pm, nand_ecc_algos[i]))
4863                                 return i;
4864                 return -ENODEV;
4865         }
4866
4867         /*
4868          * For backward compatibility we also read "nand-ecc-mode" checking
4869          * for some obsoleted values that were specifying ECC algorithm.
4870          */
4871         err = of_property_read_string(np, "nand-ecc-mode", &pm);
4872         if (err < 0)
4873                 return err;
4874
4875         if (!strcasecmp(pm, "soft"))
4876                 return NAND_ECC_HAMMING;
4877         else if (!strcasecmp(pm, "soft_bch"))
4878                 return NAND_ECC_BCH;
4879
4880         return -ENODEV;
4881 }
4882
4883 static int of_get_nand_ecc_step_size(struct device_node *np)
4884 {
4885         int ret;
4886         u32 val;
4887
4888         ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
4889         return ret ? ret : val;
4890 }
4891
4892 static int of_get_nand_ecc_strength(struct device_node *np)
4893 {
4894         int ret;
4895         u32 val;
4896
4897         ret = of_property_read_u32(np, "nand-ecc-strength", &val);
4898         return ret ? ret : val;
4899 }
4900
4901 static int of_get_nand_bus_width(struct device_node *np)
4902 {
4903         u32 val;
4904
4905         if (of_property_read_u32(np, "nand-bus-width", &val))
4906                 return 8;
4907
4908         switch (val) {
4909         case 8:
4910         case 16:
4911                 return val;
4912         default:
4913                 return -EIO;
4914         }
4915 }
4916
4917 static bool of_get_nand_on_flash_bbt(struct device_node *np)
4918 {
4919         return of_property_read_bool(np, "nand-on-flash-bbt");
4920 }
4921
4922 static int nand_dt_init(struct nand_chip *chip)
4923 {
4924         struct device_node *dn = nand_get_flash_node(chip);
4925         int ecc_mode, ecc_algo, ecc_strength, ecc_step;
4926
4927         if (!dn)
4928                 return 0;
4929
4930         if (of_get_nand_bus_width(dn) == 16)
4931                 chip->options |= NAND_BUSWIDTH_16;
4932
4933         if (of_property_read_bool(dn, "nand-is-boot-medium"))
4934                 chip->options |= NAND_IS_BOOT_MEDIUM;
4935
4936         if (of_get_nand_on_flash_bbt(dn))
4937                 chip->bbt_options |= NAND_BBT_USE_FLASH;
4938
4939         ecc_mode = of_get_nand_ecc_mode(dn);
4940         ecc_algo = of_get_nand_ecc_algo(dn);
4941         ecc_strength = of_get_nand_ecc_strength(dn);
4942         ecc_step = of_get_nand_ecc_step_size(dn);
4943
4944         if (ecc_mode >= 0)
4945                 chip->ecc.mode = ecc_mode;
4946
4947         if (ecc_algo >= 0)
4948                 chip->ecc.algo = ecc_algo;
4949
4950         if (ecc_strength >= 0)
4951                 chip->ecc.strength = ecc_strength;
4952
4953         if (ecc_step > 0)
4954                 chip->ecc.size = ecc_step;
4955
4956         if (of_property_read_bool(dn, "nand-ecc-maximize"))
4957                 chip->ecc.options |= NAND_ECC_MAXIMIZE;
4958
4959         return 0;
4960 }
4961
4962 /**
4963  * nand_scan_ident - Scan for the NAND device
4964  * @chip: NAND chip object
4965  * @maxchips: number of chips to scan for
4966  * @table: alternative NAND ID table
4967  *
4968  * This is the first phase of the normal nand_scan() function. It reads the
4969  * flash ID and sets up MTD fields accordingly.
4970  *
4971  * This helper used to be called directly from controller drivers that needed
4972  * to tweak some ECC-related parameters before nand_scan_tail(). This separation
4973  * prevented dynamic allocations during this phase which was unconvenient and
4974  * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
4975  */
4976 static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
4977                            struct nand_flash_dev *table)
4978 {
4979         struct mtd_info *mtd = nand_to_mtd(chip);
4980         struct nand_memory_organization *memorg;
4981         int nand_maf_id, nand_dev_id;
4982         unsigned int i;
4983         int ret;
4984
4985         memorg = nanddev_get_memorg(&chip->base);
4986
4987         /* Assume all dies are deselected when we enter nand_scan_ident(). */
4988         chip->cur_cs = -1;
4989
4990         mutex_init(&chip->lock);
4991
4992         /* Enforce the right timings for reset/detection */
4993         onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
4994
4995         ret = nand_dt_init(chip);
4996         if (ret)
4997                 return ret;
4998
4999         if (!mtd->name && mtd->dev.parent)
5000                 mtd->name = dev_name(mtd->dev.parent);
5001
5002         /* Set the default functions */
5003         nand_set_defaults(chip);
5004
5005         ret = nand_legacy_check_hooks(chip);
5006         if (ret)
5007                 return ret;
5008
5009         /* Read the flash type */
5010         ret = nand_detect(chip, table);
5011         if (ret) {
5012                 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5013                         pr_warn("No NAND device found\n");
5014                 nand_deselect_target(chip);
5015                 return ret;
5016         }
5017
5018         nand_maf_id = chip->id.data[0];
5019         nand_dev_id = chip->id.data[1];
5020
5021         nand_deselect_target(chip);
5022
5023         /* Check for a chip array */
5024         for (i = 1; i < maxchips; i++) {
5025                 u8 id[2];
5026
5027                 /* See comment in nand_get_flash_type for reset */
5028                 ret = nand_reset(chip, i);
5029                 if (ret)
5030                         break;
5031
5032                 nand_select_target(chip, i);
5033                 /* Send the command for reading device ID */
5034                 ret = nand_readid_op(chip, 0, id, sizeof(id));
5035                 if (ret)
5036                         break;
5037                 /* Read manufacturer and device IDs */
5038                 if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5039                         nand_deselect_target(chip);
5040                         break;
5041                 }
5042                 nand_deselect_target(chip);
5043         }
5044         if (i > 1)
5045                 pr_info("%d chips detected\n", i);
5046
5047         /* Store the number of chips and calc total size for mtd */
5048         memorg->ntargets = i;
5049         mtd->size = i * nanddev_target_size(&chip->base);
5050
5051         return 0;
5052 }
5053
5054 static void nand_scan_ident_cleanup(struct nand_chip *chip)
5055 {
5056         kfree(chip->parameters.model);
5057         kfree(chip->parameters.onfi);
5058 }
5059
5060 static int nand_set_ecc_soft_ops(struct nand_chip *chip)
5061 {
5062         struct mtd_info *mtd = nand_to_mtd(chip);
5063         struct nand_ecc_ctrl *ecc = &chip->ecc;
5064
5065         if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
5066                 return -EINVAL;
5067
5068         switch (ecc->algo) {
5069         case NAND_ECC_HAMMING:
5070                 ecc->calculate = nand_calculate_ecc;
5071                 ecc->correct = nand_correct_data;
5072                 ecc->read_page = nand_read_page_swecc;
5073                 ecc->read_subpage = nand_read_subpage;
5074                 ecc->write_page = nand_write_page_swecc;
5075                 ecc->read_page_raw = nand_read_page_raw;
5076                 ecc->write_page_raw = nand_write_page_raw;
5077                 ecc->read_oob = nand_read_oob_std;
5078                 ecc->write_oob = nand_write_oob_std;
5079                 if (!ecc->size)
5080                         ecc->size = 256;
5081                 ecc->bytes = 3;
5082                 ecc->strength = 1;
5083
5084                 if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC))
5085                         ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
5086
5087                 return 0;
5088         case NAND_ECC_BCH:
5089                 if (!mtd_nand_has_bch()) {
5090                         WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
5091                         return -EINVAL;
5092                 }
5093                 ecc->calculate = nand_bch_calculate_ecc;
5094                 ecc->correct = nand_bch_correct_data;
5095                 ecc->read_page = nand_read_page_swecc;
5096                 ecc->read_subpage = nand_read_subpage;
5097                 ecc->write_page = nand_write_page_swecc;
5098                 ecc->read_page_raw = nand_read_page_raw;
5099                 ecc->write_page_raw = nand_write_page_raw;
5100                 ecc->read_oob = nand_read_oob_std;
5101                 ecc->write_oob = nand_write_oob_std;
5102
5103                 /*
5104                 * Board driver should supply ecc.size and ecc.strength
5105                 * values to select how many bits are correctable.
5106                 * Otherwise, default to 4 bits for large page devices.
5107                 */
5108                 if (!ecc->size && (mtd->oobsize >= 64)) {
5109                         ecc->size = 512;
5110                         ecc->strength = 4;
5111                 }
5112
5113                 /*
5114                  * if no ecc placement scheme was provided pickup the default
5115                  * large page one.
5116                  */
5117                 if (!mtd->ooblayout) {
5118                         /* handle large page devices only */
5119                         if (mtd->oobsize < 64) {
5120                                 WARN(1, "OOB layout is required when using software BCH on small pages\n");
5121                                 return -EINVAL;
5122                         }
5123
5124                         mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
5125
5126                 }
5127
5128                 /*
5129                  * We can only maximize ECC config when the default layout is
5130                  * used, otherwise we don't know how many bytes can really be
5131                  * used.
5132                  */
5133                 if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
5134                     ecc->options & NAND_ECC_MAXIMIZE) {
5135                         int steps, bytes;
5136
5137                         /* Always prefer 1k blocks over 512bytes ones */
5138                         ecc->size = 1024;
5139                         steps = mtd->writesize / ecc->size;
5140
5141                         /* Reserve 2 bytes for the BBM */
5142                         bytes = (mtd->oobsize - 2) / steps;
5143                         ecc->strength = bytes * 8 / fls(8 * ecc->size);
5144                 }
5145
5146                 /* See nand_bch_init() for details. */
5147                 ecc->bytes = 0;
5148                 ecc->priv = nand_bch_init(mtd);
5149                 if (!ecc->priv) {
5150                         WARN(1, "BCH ECC initialization failed!\n");
5151                         return -EINVAL;
5152                 }
5153                 return 0;
5154         default:
5155                 WARN(1, "Unsupported ECC algorithm!\n");
5156                 return -EINVAL;
5157         }
5158 }
5159
5160 /**
5161  * nand_check_ecc_caps - check the sanity of preset ECC settings
5162  * @chip: nand chip info structure
5163  * @caps: ECC caps info structure
5164  * @oobavail: OOB size that the ECC engine can use
5165  *
5166  * When ECC step size and strength are already set, check if they are supported
5167  * by the controller and the calculated ECC bytes fit within the chip's OOB.
5168  * On success, the calculated ECC bytes is set.
5169  */
5170 static int
5171 nand_check_ecc_caps(struct nand_chip *chip,
5172                     const struct nand_ecc_caps *caps, int oobavail)
5173 {
5174         struct mtd_info *mtd = nand_to_mtd(chip);
5175         const struct nand_ecc_step_info *stepinfo;
5176         int preset_step = chip->ecc.size;
5177         int preset_strength = chip->ecc.strength;
5178         int ecc_bytes, nsteps = mtd->writesize / preset_step;
5179         int i, j;
5180
5181         for (i = 0; i < caps->nstepinfos; i++) {
5182                 stepinfo = &caps->stepinfos[i];
5183
5184                 if (stepinfo->stepsize != preset_step)
5185                         continue;
5186
5187                 for (j = 0; j < stepinfo->nstrengths; j++) {
5188                         if (stepinfo->strengths[j] != preset_strength)
5189                                 continue;
5190
5191                         ecc_bytes = caps->calc_ecc_bytes(preset_step,
5192                                                          preset_strength);
5193                         if (WARN_ON_ONCE(ecc_bytes < 0))
5194                                 return ecc_bytes;
5195
5196                         if (ecc_bytes * nsteps > oobavail) {
5197                                 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
5198                                        preset_step, preset_strength);
5199                                 return -ENOSPC;
5200                         }
5201
5202                         chip->ecc.bytes = ecc_bytes;
5203
5204                         return 0;
5205                 }
5206         }
5207
5208         pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
5209                preset_step, preset_strength);
5210
5211         return -ENOTSUPP;
5212 }
5213
5214 /**
5215  * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
5216  * @chip: nand chip info structure
5217  * @caps: ECC engine caps info structure
5218  * @oobavail: OOB size that the ECC engine can use
5219  *
5220  * If a chip's ECC requirement is provided, try to meet it with the least
5221  * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
5222  * On success, the chosen ECC settings are set.
5223  */
5224 static int
5225 nand_match_ecc_req(struct nand_chip *chip,
5226                    const struct nand_ecc_caps *caps, int oobavail)
5227 {
5228         struct mtd_info *mtd = nand_to_mtd(chip);
5229         const struct nand_ecc_step_info *stepinfo;
5230         int req_step = chip->base.eccreq.step_size;
5231         int req_strength = chip->base.eccreq.strength;
5232         int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
5233         int best_step, best_strength, best_ecc_bytes;
5234         int best_ecc_bytes_total = INT_MAX;
5235         int i, j;
5236
5237         /* No information provided by the NAND chip */
5238         if (!req_step || !req_strength)
5239                 return -ENOTSUPP;
5240
5241         /* number of correctable bits the chip requires in a page */
5242         req_corr = mtd->writesize / req_step * req_strength;
5243
5244         for (i = 0; i < caps->nstepinfos; i++) {
5245                 stepinfo = &caps->stepinfos[i];
5246                 step_size = stepinfo->stepsize;
5247
5248                 for (j = 0; j < stepinfo->nstrengths; j++) {
5249                         strength = stepinfo->strengths[j];
5250
5251                         /*
5252                          * If both step size and strength are smaller than the
5253                          * chip's requirement, it is not easy to compare the
5254                          * resulted reliability.
5255                          */
5256                         if (step_size < req_step && strength < req_strength)
5257                                 continue;
5258
5259                         if (mtd->writesize % step_size)
5260                                 continue;
5261
5262                         nsteps = mtd->writesize / step_size;
5263
5264                         ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5265                         if (WARN_ON_ONCE(ecc_bytes < 0))
5266                                 continue;
5267                         ecc_bytes_total = ecc_bytes * nsteps;
5268
5269                         if (ecc_bytes_total > oobavail ||
5270                             strength * nsteps < req_corr)
5271                                 continue;
5272
5273                         /*
5274                          * We assume the best is to meet the chip's requrement
5275                          * with the least number of ECC bytes.
5276                          */
5277                         if (ecc_bytes_total < best_ecc_bytes_total) {
5278                                 best_ecc_bytes_total = ecc_bytes_total;
5279                                 best_step = step_size;
5280                                 best_strength = strength;
5281                                 best_ecc_bytes = ecc_bytes;
5282                         }
5283                 }
5284         }
5285
5286         if (best_ecc_bytes_total == INT_MAX)
5287                 return -ENOTSUPP;
5288
5289         chip->ecc.size = best_step;
5290         chip->ecc.strength = best_strength;
5291         chip->ecc.bytes = best_ecc_bytes;
5292
5293         return 0;
5294 }
5295
5296 /**
5297  * nand_maximize_ecc - choose the max ECC strength available
5298  * @chip: nand chip info structure
5299  * @caps: ECC engine caps info structure
5300  * @oobavail: OOB size that the ECC engine can use
5301  *
5302  * Choose the max ECC strength that is supported on the controller, and can fit
5303  * within the chip's OOB.  On success, the chosen ECC settings are set.
5304  */
5305 static int
5306 nand_maximize_ecc(struct nand_chip *chip,
5307                   const struct nand_ecc_caps *caps, int oobavail)
5308 {
5309         struct mtd_info *mtd = nand_to_mtd(chip);
5310         const struct nand_ecc_step_info *stepinfo;
5311         int step_size, strength, nsteps, ecc_bytes, corr;
5312         int best_corr = 0;
5313         int best_step = 0;
5314         int best_strength, best_ecc_bytes;
5315         int i, j;
5316
5317         for (i = 0; i < caps->nstepinfos; i++) {
5318                 stepinfo = &caps->stepinfos[i];
5319                 step_size = stepinfo->stepsize;
5320
5321                 /* If chip->ecc.size is already set, respect it */
5322                 if (chip->ecc.size && step_size != chip->ecc.size)
5323                         continue;
5324
5325                 for (j = 0; j < stepinfo->nstrengths; j++) {
5326                         strength = stepinfo->strengths[j];
5327
5328                         if (mtd->writesize % step_size)
5329                                 continue;
5330
5331                         nsteps = mtd->writesize / step_size;
5332
5333                         ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5334                         if (WARN_ON_ONCE(ecc_bytes < 0))
5335                                 continue;
5336
5337                         if (ecc_bytes * nsteps > oobavail)
5338                                 continue;
5339
5340                         corr = strength * nsteps;
5341
5342                         /*
5343                          * If the number of correctable bits is the same,
5344                          * bigger step_size has more reliability.
5345                          */
5346                         if (corr > best_corr ||
5347                             (corr == best_corr && step_size > best_step)) {
5348                                 best_corr = corr;
5349                                 best_step = step_size;
5350                                 best_strength = strength;
5351                                 best_ecc_bytes = ecc_bytes;
5352                         }
5353                 }
5354         }
5355
5356         if (!best_corr)
5357                 return -ENOTSUPP;
5358
5359         chip->ecc.size = best_step;
5360         chip->ecc.strength = best_strength;
5361         chip->ecc.bytes = best_ecc_bytes;
5362
5363         return 0;
5364 }
5365
5366 /**
5367  * nand_ecc_choose_conf - Set the ECC strength and ECC step size
5368  * @chip: nand chip info structure
5369  * @caps: ECC engine caps info structure
5370  * @oobavail: OOB size that the ECC engine can use
5371  *
5372  * Choose the ECC configuration according to following logic
5373  *
5374  * 1. If both ECC step size and ECC strength are already set (usually by DT)
5375  *    then check if it is supported by this controller.
5376  * 2. If NAND_ECC_MAXIMIZE is set, then select maximum ECC strength.
5377  * 3. Otherwise, try to match the ECC step size and ECC strength closest
5378  *    to the chip's requirement. If available OOB size can't fit the chip
5379  *    requirement then fallback to the maximum ECC step size and ECC strength.
5380  *
5381  * On success, the chosen ECC settings are set.
5382  */
5383 int nand_ecc_choose_conf(struct nand_chip *chip,
5384                          const struct nand_ecc_caps *caps, int oobavail)
5385 {
5386         struct mtd_info *mtd = nand_to_mtd(chip);
5387
5388         if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
5389                 return -EINVAL;
5390
5391         if (chip->ecc.size && chip->ecc.strength)
5392                 return nand_check_ecc_caps(chip, caps, oobavail);
5393
5394         if (chip->ecc.options & NAND_ECC_MAXIMIZE)
5395                 return nand_maximize_ecc(chip, caps, oobavail);
5396
5397         if (!nand_match_ecc_req(chip, caps, oobavail))
5398                 return 0;
5399
5400         return nand_maximize_ecc(chip, caps, oobavail);
5401 }
5402 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
5403
5404 /*
5405  * Check if the chip configuration meet the datasheet requirements.
5406
5407  * If our configuration corrects A bits per B bytes and the minimum
5408  * required correction level is X bits per Y bytes, then we must ensure
5409  * both of the following are true:
5410  *
5411  * (1) A / B >= X / Y
5412  * (2) A >= X
5413  *
5414  * Requirement (1) ensures we can correct for the required bitflip density.
5415  * Requirement (2) ensures we can correct even when all bitflips are clumped
5416  * in the same sector.
5417  */
5418 static bool nand_ecc_strength_good(struct nand_chip *chip)
5419 {
5420         struct mtd_info *mtd = nand_to_mtd(chip);
5421         struct nand_ecc_ctrl *ecc = &chip->ecc;
5422         int corr, ds_corr;
5423
5424         if (ecc->size == 0 || chip->base.eccreq.step_size == 0)
5425                 /* Not enough information */
5426                 return true;
5427
5428         /*
5429          * We get the number of corrected bits per page to compare
5430          * the correction density.
5431          */
5432         corr = (mtd->writesize * ecc->strength) / ecc->size;
5433         ds_corr = (mtd->writesize * chip->base.eccreq.strength) /
5434                   chip->base.eccreq.step_size;
5435
5436         return corr >= ds_corr && ecc->strength >= chip->base.eccreq.strength;
5437 }
5438
5439 static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
5440 {
5441         struct nand_chip *chip = container_of(nand, struct nand_chip,
5442                                               base);
5443         unsigned int eb = nanddev_pos_to_row(nand, pos);
5444         int ret;
5445
5446         eb >>= nand->rowconv.eraseblock_addr_shift;
5447
5448         nand_select_target(chip, pos->target);
5449         ret = nand_erase_op(chip, eb);
5450         nand_deselect_target(chip);
5451
5452         return ret;
5453 }
5454
5455 static int rawnand_markbad(struct nand_device *nand,
5456                            const struct nand_pos *pos)
5457 {
5458         struct nand_chip *chip = container_of(nand, struct nand_chip,
5459                                               base);
5460
5461         return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
5462 }
5463
5464 static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
5465 {
5466         struct nand_chip *chip = container_of(nand, struct nand_chip,
5467                                               base);
5468         int ret;
5469
5470         nand_select_target(chip, pos->target);
5471         ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
5472         nand_deselect_target(chip);
5473
5474         return ret;
5475 }
5476
5477 static const struct nand_ops rawnand_ops = {
5478         .erase = rawnand_erase,
5479         .markbad = rawnand_markbad,
5480         .isbad = rawnand_isbad,
5481 };
5482
5483 /**
5484  * nand_scan_tail - Scan for the NAND device
5485  * @chip: NAND chip object
5486  *
5487  * This is the second phase of the normal nand_scan() function. It fills out
5488  * all the uninitialized function pointers with the defaults and scans for a
5489  * bad block table if appropriate.
5490  */
5491 static int nand_scan_tail(struct nand_chip *chip)
5492 {
5493         struct mtd_info *mtd = nand_to_mtd(chip);
5494         struct nand_ecc_ctrl *ecc = &chip->ecc;
5495         int ret, i;
5496
5497         /* New bad blocks should be marked in OOB, flash-based BBT, or both */
5498         if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
5499                    !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
5500                 return -EINVAL;
5501         }
5502
5503         chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
5504         if (!chip->data_buf)
5505                 return -ENOMEM;
5506
5507         /*
5508          * FIXME: some NAND manufacturer drivers expect the first die to be
5509          * selected when manufacturer->init() is called. They should be fixed
5510          * to explictly select the relevant die when interacting with the NAND
5511          * chip.
5512          */
5513         nand_select_target(chip, 0);
5514         ret = nand_manufacturer_init(chip);
5515         nand_deselect_target(chip);
5516         if (ret)
5517                 goto err_free_buf;
5518
5519         /* Set the internal oob buffer location, just after the page data */
5520         chip->oob_poi = chip->data_buf + mtd->writesize;
5521
5522         /*
5523          * If no default placement scheme is given, select an appropriate one.
5524          */
5525         if (!mtd->ooblayout &&
5526             !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
5527                 switch (mtd->oobsize) {
5528                 case 8:
5529                 case 16:
5530                         mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
5531                         break;
5532                 case 64:
5533                 case 128:
5534                         mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
5535                         break;
5536                 default:
5537                         /*
5538                          * Expose the whole OOB area to users if ECC_NONE
5539                          * is passed. We could do that for all kind of
5540                          * ->oobsize, but we must keep the old large/small
5541                          * page with ECC layout when ->oobsize <= 128 for
5542                          * compatibility reasons.
5543                          */
5544                         if (ecc->mode == NAND_ECC_NONE) {
5545                                 mtd_set_ooblayout(mtd,
5546                                                 &nand_ooblayout_lp_ops);
5547                                 break;
5548                         }
5549
5550                         WARN(1, "No oob scheme defined for oobsize %d\n",
5551                                 mtd->oobsize);
5552                         ret = -EINVAL;
5553                         goto err_nand_manuf_cleanup;
5554                 }
5555         }
5556
5557         /*
5558          * Check ECC mode, default to software if 3byte/512byte hardware ECC is
5559          * selected and we have 256 byte pagesize fallback to software ECC
5560          */
5561
5562         switch (ecc->mode) {
5563         case NAND_ECC_HW_OOB_FIRST:
5564                 /* Similar to NAND_ECC_HW, but a separate read_page handle */
5565                 if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
5566                         WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5567                         ret = -EINVAL;
5568                         goto err_nand_manuf_cleanup;
5569                 }
5570                 if (!ecc->read_page)
5571                         ecc->read_page = nand_read_page_hwecc_oob_first;
5572                 /* fall through */
5573
5574         case NAND_ECC_HW:
5575                 /* Use standard hwecc read page function? */
5576                 if (!ecc->read_page)
5577                         ecc->read_page = nand_read_page_hwecc;
5578                 if (!ecc->write_page)
5579                         ecc->write_page = nand_write_page_hwecc;
5580                 if (!ecc->read_page_raw)
5581                         ecc->read_page_raw = nand_read_page_raw;
5582                 if (!ecc->write_page_raw)
5583                         ecc->write_page_raw = nand_write_page_raw;
5584                 if (!ecc->read_oob)
5585                         ecc->read_oob = nand_read_oob_std;
5586                 if (!ecc->write_oob)
5587                         ecc->write_oob = nand_write_oob_std;
5588                 if (!ecc->read_subpage)
5589                         ecc->read_subpage = nand_read_subpage;
5590                 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
5591                         ecc->write_subpage = nand_write_subpage_hwecc;
5592                 /* fall through */
5593
5594         case NAND_ECC_HW_SYNDROME:
5595                 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
5596                     (!ecc->read_page ||
5597                      ecc->read_page == nand_read_page_hwecc ||
5598                      !ecc->write_page ||
5599                      ecc->write_page == nand_write_page_hwecc)) {
5600                         WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5601                         ret = -EINVAL;
5602                         goto err_nand_manuf_cleanup;
5603                 }
5604                 /* Use standard syndrome read/write page function? */
5605                 if (!ecc->read_page)
5606                         ecc->read_page = nand_read_page_syndrome;
5607                 if (!ecc->write_page)
5608                         ecc->write_page = nand_write_page_syndrome;
5609                 if (!ecc->read_page_raw)
5610                         ecc->read_page_raw = nand_read_page_raw_syndrome;
5611                 if (!ecc->write_page_raw)
5612                         ecc->write_page_raw = nand_write_page_raw_syndrome;
5613                 if (!ecc->read_oob)
5614                         ecc->read_oob = nand_read_oob_syndrome;
5615                 if (!ecc->write_oob)
5616                         ecc->write_oob = nand_write_oob_syndrome;
5617
5618                 if (mtd->writesize >= ecc->size) {
5619                         if (!ecc->strength) {
5620                                 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
5621                                 ret = -EINVAL;
5622                                 goto err_nand_manuf_cleanup;
5623                         }
5624                         break;
5625                 }
5626                 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
5627                         ecc->size, mtd->writesize);
5628                 ecc->mode = NAND_ECC_SOFT;
5629                 ecc->algo = NAND_ECC_HAMMING;
5630                 /* fall through */
5631
5632         case NAND_ECC_SOFT:
5633                 ret = nand_set_ecc_soft_ops(chip);
5634                 if (ret) {
5635                         ret = -EINVAL;
5636                         goto err_nand_manuf_cleanup;
5637                 }
5638                 break;
5639
5640         case NAND_ECC_ON_DIE:
5641                 if (!ecc->read_page || !ecc->write_page) {
5642                         WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
5643                         ret = -EINVAL;
5644                         goto err_nand_manuf_cleanup;
5645                 }
5646                 if (!ecc->read_oob)
5647                         ecc->read_oob = nand_read_oob_std;
5648                 if (!ecc->write_oob)
5649                         ecc->write_oob = nand_write_oob_std;
5650                 break;
5651
5652         case NAND_ECC_NONE:
5653                 pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
5654                 ecc->read_page = nand_read_page_raw;
5655                 ecc->write_page = nand_write_page_raw;
5656                 ecc->read_oob = nand_read_oob_std;
5657                 ecc->read_page_raw = nand_read_page_raw;
5658                 ecc->write_page_raw = nand_write_page_raw;
5659                 ecc->write_oob = nand_write_oob_std;
5660                 ecc->size = mtd->writesize;
5661                 ecc->bytes = 0;
5662                 ecc->strength = 0;
5663                 break;
5664
5665         default:
5666                 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
5667                 ret = -EINVAL;
5668                 goto err_nand_manuf_cleanup;
5669         }
5670
5671         if (ecc->correct || ecc->calculate) {
5672                 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5673                 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5674                 if (!ecc->calc_buf || !ecc->code_buf) {
5675                         ret = -ENOMEM;
5676                         goto err_nand_manuf_cleanup;
5677                 }
5678         }
5679
5680         /* For many systems, the standard OOB write also works for raw */
5681         if (!ecc->read_oob_raw)
5682                 ecc->read_oob_raw = ecc->read_oob;
5683         if (!ecc->write_oob_raw)
5684                 ecc->write_oob_raw = ecc->write_oob;
5685
5686         /* propagate ecc info to mtd_info */
5687         mtd->ecc_strength = ecc->strength;
5688         mtd->ecc_step_size = ecc->size;
5689
5690         /*
5691          * Set the number of read / write steps for one page depending on ECC
5692          * mode.
5693          */
5694         ecc->steps = mtd->writesize / ecc->size;
5695         if (ecc->steps * ecc->size != mtd->writesize) {
5696                 WARN(1, "Invalid ECC parameters\n");
5697                 ret = -EINVAL;
5698                 goto err_nand_manuf_cleanup;
5699         }
5700         ecc->total = ecc->steps * ecc->bytes;
5701         if (ecc->total > mtd->oobsize) {
5702                 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
5703                 ret = -EINVAL;
5704                 goto err_nand_manuf_cleanup;
5705         }
5706
5707         /*
5708          * The number of bytes available for a client to place data into
5709          * the out of band area.
5710          */
5711         ret = mtd_ooblayout_count_freebytes(mtd);
5712         if (ret < 0)
5713                 ret = 0;
5714
5715         mtd->oobavail = ret;
5716
5717         /* ECC sanity check: warn if it's too weak */
5718         if (!nand_ecc_strength_good(chip))
5719                 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
5720                         mtd->name);
5721
5722         /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
5723         if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
5724                 switch (ecc->steps) {
5725                 case 2:
5726                         mtd->subpage_sft = 1;
5727                         break;
5728                 case 4:
5729                 case 8:
5730                 case 16:
5731                         mtd->subpage_sft = 2;
5732                         break;
5733                 }
5734         }
5735         chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
5736
5737         /* Invalidate the pagebuffer reference */
5738         chip->pagecache.page = -1;
5739
5740         /* Large page NAND with SOFT_ECC should support subpage reads */
5741         switch (ecc->mode) {
5742         case NAND_ECC_SOFT:
5743                 if (chip->page_shift > 9)
5744                         chip->options |= NAND_SUBPAGE_READ;
5745                 break;
5746
5747         default:
5748                 break;
5749         }
5750
5751         ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
5752         if (ret)
5753                 goto err_nand_manuf_cleanup;
5754
5755         /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */
5756         if (chip->options & NAND_ROM)
5757                 mtd->flags = MTD_CAP_ROM;
5758
5759         /* Fill in remaining MTD driver data */
5760         mtd->_erase = nand_erase;
5761         mtd->_point = NULL;
5762         mtd->_unpoint = NULL;
5763         mtd->_panic_write = panic_nand_write;
5764         mtd->_read_oob = nand_read_oob;
5765         mtd->_write_oob = nand_write_oob;
5766         mtd->_sync = nand_sync;
5767         mtd->_lock = NULL;
5768         mtd->_unlock = NULL;
5769         mtd->_suspend = nand_suspend;
5770         mtd->_resume = nand_resume;
5771         mtd->_reboot = nand_shutdown;
5772         mtd->_block_isreserved = nand_block_isreserved;
5773         mtd->_block_isbad = nand_block_isbad;
5774         mtd->_block_markbad = nand_block_markbad;
5775         mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
5776
5777         /*
5778          * Initialize bitflip_threshold to its default prior scan_bbt() call.
5779          * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
5780          * properly set.
5781          */
5782         if (!mtd->bitflip_threshold)
5783                 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
5784
5785         /* Initialize the ->data_interface field. */
5786         ret = nand_init_data_interface(chip);
5787         if (ret)
5788                 goto err_nanddev_cleanup;
5789
5790         /* Enter fastest possible mode on all dies. */
5791         for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
5792                 ret = nand_setup_data_interface(chip, i);
5793                 if (ret)
5794                         goto err_nanddev_cleanup;
5795         }
5796
5797         /* Check, if we should skip the bad block table scan */
5798         if (chip->options & NAND_SKIP_BBTSCAN)
5799                 return 0;
5800
5801         /* Build bad block table */
5802         ret = nand_create_bbt(chip);
5803         if (ret)
5804                 goto err_nanddev_cleanup;
5805
5806         return 0;
5807
5808
5809 err_nanddev_cleanup:
5810         nanddev_cleanup(&chip->base);
5811
5812 err_nand_manuf_cleanup:
5813         nand_manufacturer_cleanup(chip);
5814
5815 err_free_buf:
5816         kfree(chip->data_buf);
5817         kfree(ecc->code_buf);
5818         kfree(ecc->calc_buf);
5819
5820         return ret;
5821 }
5822
5823 static int nand_attach(struct nand_chip *chip)
5824 {
5825         if (chip->controller->ops && chip->controller->ops->attach_chip)
5826                 return chip->controller->ops->attach_chip(chip);
5827
5828         return 0;
5829 }
5830
5831 static void nand_detach(struct nand_chip *chip)
5832 {
5833         if (chip->controller->ops && chip->controller->ops->detach_chip)
5834                 chip->controller->ops->detach_chip(chip);
5835 }
5836
5837 /**
5838  * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
5839  * @chip: NAND chip object
5840  * @maxchips: number of chips to scan for.
5841  * @ids: optional flash IDs table
5842  *
5843  * This fills out all the uninitialized function pointers with the defaults.
5844  * The flash ID is read and the mtd/chip structures are filled with the
5845  * appropriate values.
5846  */
5847 int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
5848                        struct nand_flash_dev *ids)
5849 {
5850         int ret;
5851
5852         if (!maxchips)
5853                 return -EINVAL;
5854
5855         ret = nand_scan_ident(chip, maxchips, ids);
5856         if (ret)
5857                 return ret;
5858
5859         ret = nand_attach(chip);
5860         if (ret)
5861                 goto cleanup_ident;
5862
5863         ret = nand_scan_tail(chip);
5864         if (ret)
5865                 goto detach_chip;
5866
5867         return 0;
5868
5869 detach_chip:
5870         nand_detach(chip);
5871 cleanup_ident:
5872         nand_scan_ident_cleanup(chip);
5873
5874         return ret;
5875 }
5876 EXPORT_SYMBOL(nand_scan_with_ids);
5877
5878 /**
5879  * nand_cleanup - [NAND Interface] Free resources held by the NAND device
5880  * @chip: NAND chip object
5881  */
5882 void nand_cleanup(struct nand_chip *chip)
5883 {
5884         if (chip->ecc.mode == NAND_ECC_SOFT &&
5885             chip->ecc.algo == NAND_ECC_BCH)
5886                 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
5887
5888         /* Free bad block table memory */
5889         kfree(chip->bbt);
5890         kfree(chip->data_buf);
5891         kfree(chip->ecc.code_buf);
5892         kfree(chip->ecc.calc_buf);
5893
5894         /* Free bad block descriptor memory */
5895         if (chip->badblock_pattern && chip->badblock_pattern->options
5896                         & NAND_BBT_DYNAMICSTRUCT)
5897                 kfree(chip->badblock_pattern);
5898
5899         /* Free manufacturer priv data. */
5900         nand_manufacturer_cleanup(chip);
5901
5902         /* Free controller specific allocations after chip identification */
5903         nand_detach(chip);
5904
5905         /* Free identification phase allocations */
5906         nand_scan_ident_cleanup(chip);
5907 }
5908
5909 EXPORT_SYMBOL_GPL(nand_cleanup);
5910
5911 /**
5912  * nand_release - [NAND Interface] Unregister the MTD device and free resources
5913  *                held by the NAND device
5914  * @chip: NAND chip object
5915  */
5916 void nand_release(struct nand_chip *chip)
5917 {
5918         mtd_device_unregister(nand_to_mtd(chip));
5919         nand_cleanup(chip);
5920 }
5921 EXPORT_SYMBOL_GPL(nand_release);
5922
5923 MODULE_LICENSE("GPL");
5924 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
5925 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
5926 MODULE_DESCRIPTION("Generic NAND flash driver code");