1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016-2017 Micron Technology, Inc.
6 * Peter Pan <peterpandong@micron.com>
7 * Boris Brezillon <boris.brezillon@bootlin.com>
10 #define pr_fmt(fmt) "spi-nand: " fmt
12 #include <linux/device.h>
13 #include <linux/jiffies.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mtd/spinand.h>
18 #include <linux/slab.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spi/spi-mem.h>
22 static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
23 const struct nand_page_io_req *req,
26 struct nand_device *nand = spinand_to_nand(spinand);
29 if (nand->memorg.planes_per_lun < 2)
32 /* The plane number is passed in MSB just above the column address */
33 shift = fls(nand->memorg.pagesize);
34 *column |= req->pos.plane << shift;
37 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
39 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
43 ret = spi_mem_exec_op(spinand->spimem, &op);
47 *val = *spinand->scratchbuf;
51 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
53 struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
56 *spinand->scratchbuf = val;
57 return spi_mem_exec_op(spinand->spimem, &op);
60 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
62 return spinand_read_reg_op(spinand, REG_STATUS, status);
65 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
67 struct nand_device *nand = spinand_to_nand(spinand);
69 if (WARN_ON(spinand->cur_target < 0 ||
70 spinand->cur_target >= nand->memorg.ntargets))
73 *cfg = spinand->cfg_cache[spinand->cur_target];
77 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
79 struct nand_device *nand = spinand_to_nand(spinand);
82 if (WARN_ON(spinand->cur_target < 0 ||
83 spinand->cur_target >= nand->memorg.ntargets))
86 if (spinand->cfg_cache[spinand->cur_target] == cfg)
89 ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
93 spinand->cfg_cache[spinand->cur_target] = cfg;
98 * spinand_upd_cfg() - Update the configuration register
99 * @spinand: the spinand device
100 * @mask: the mask encoding the bits to update in the config reg
101 * @val: the new value to apply
103 * Update the configuration register.
105 * Return: 0 on success, a negative error code otherwise.
107 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
112 ret = spinand_get_cfg(spinand, &cfg);
119 return spinand_set_cfg(spinand, cfg);
123 * spinand_select_target() - Select a specific NAND target/die
124 * @spinand: the spinand device
125 * @target: the target/die to select
127 * Select a new target/die. If chip only has one die, this function is a NOOP.
129 * Return: 0 on success, a negative error code otherwise.
131 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
133 struct nand_device *nand = spinand_to_nand(spinand);
136 if (WARN_ON(target >= nand->memorg.ntargets))
139 if (spinand->cur_target == target)
142 if (nand->memorg.ntargets == 1) {
143 spinand->cur_target = target;
147 ret = spinand->select_target(spinand, target);
151 spinand->cur_target = target;
155 static int spinand_init_cfg_cache(struct spinand_device *spinand)
157 struct nand_device *nand = spinand_to_nand(spinand);
158 struct device *dev = &spinand->spimem->spi->dev;
162 spinand->cfg_cache = devm_kcalloc(dev,
163 nand->memorg.ntargets,
164 sizeof(*spinand->cfg_cache),
166 if (!spinand->cfg_cache)
169 for (target = 0; target < nand->memorg.ntargets; target++) {
170 ret = spinand_select_target(spinand, target);
175 * We use spinand_read_reg_op() instead of spinand_get_cfg()
176 * here to bypass the config cache.
178 ret = spinand_read_reg_op(spinand, REG_CFG,
179 &spinand->cfg_cache[target]);
187 static int spinand_init_quad_enable(struct spinand_device *spinand)
191 if (!(spinand->flags & SPINAND_HAS_QE_BIT))
194 if (spinand->op_templates.read_cache->data.buswidth == 4 ||
195 spinand->op_templates.write_cache->data.buswidth == 4 ||
196 spinand->op_templates.update_cache->data.buswidth == 4)
199 return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
200 enable ? CFG_QUAD_ENABLE : 0);
203 static int spinand_ecc_enable(struct spinand_device *spinand,
206 return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
207 enable ? CFG_ECC_ENABLE : 0);
210 static int spinand_write_enable_op(struct spinand_device *spinand)
212 struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
214 return spi_mem_exec_op(spinand->spimem, &op);
217 static int spinand_load_page_op(struct spinand_device *spinand,
218 const struct nand_page_io_req *req)
220 struct nand_device *nand = spinand_to_nand(spinand);
221 unsigned int row = nanddev_pos_to_row(nand, &req->pos);
222 struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
224 return spi_mem_exec_op(spinand->spimem, &op);
227 static int spinand_read_from_cache_op(struct spinand_device *spinand,
228 const struct nand_page_io_req *req)
230 struct spi_mem_op op = *spinand->op_templates.read_cache;
231 struct nand_device *nand = spinand_to_nand(spinand);
232 struct mtd_info *mtd = nanddev_to_mtd(nand);
233 struct nand_page_io_req adjreq = *req;
234 unsigned int nbytes = 0;
240 adjreq.datalen = nanddev_page_size(nand);
242 adjreq.databuf.in = spinand->databuf;
243 buf = spinand->databuf;
244 nbytes = adjreq.datalen;
248 adjreq.ooblen = nanddev_per_page_oobsize(nand);
250 adjreq.oobbuf.in = spinand->oobbuf;
251 nbytes += nanddev_per_page_oobsize(nand);
253 buf = spinand->oobbuf;
254 column = nanddev_page_size(nand);
258 spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
259 op.addr.val = column;
262 * Some controllers are limited in term of max RX data size. In this
263 * case, just repeat the READ_CACHE operation after updating the
267 op.data.buf.in = buf;
268 op.data.nbytes = nbytes;
269 ret = spi_mem_adjust_op_size(spinand->spimem, &op);
273 ret = spi_mem_exec_op(spinand->spimem, &op);
277 buf += op.data.nbytes;
278 nbytes -= op.data.nbytes;
279 op.addr.val += op.data.nbytes;
283 memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
287 if (req->mode == MTD_OPS_AUTO_OOB)
288 mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
293 memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
300 static int spinand_write_to_cache_op(struct spinand_device *spinand,
301 const struct nand_page_io_req *req)
303 struct spi_mem_op op = *spinand->op_templates.write_cache;
304 struct nand_device *nand = spinand_to_nand(spinand);
305 struct mtd_info *mtd = nanddev_to_mtd(nand);
306 struct nand_page_io_req adjreq = *req;
307 void *buf = spinand->databuf;
313 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
314 * the cache content to 0xFF (depends on vendor implementation), so we
315 * must fill the page cache entirely even if we only want to program
316 * the data portion of the page, otherwise we might corrupt the BBM or
317 * user data previously programmed in OOB area.
319 nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
320 memset(spinand->databuf, 0xff, nbytes);
322 adjreq.datalen = nanddev_page_size(nand);
323 adjreq.databuf.out = spinand->databuf;
324 adjreq.ooblen = nanddev_per_page_oobsize(nand);
326 adjreq.oobbuf.out = spinand->oobbuf;
329 memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
333 if (req->mode == MTD_OPS_AUTO_OOB)
334 mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
339 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
343 spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
345 op = *spinand->op_templates.write_cache;
346 op.addr.val = column;
349 * Some controllers are limited in term of max TX data size. In this
350 * case, split the operation into one LOAD CACHE and one or more
354 op.data.buf.out = buf;
355 op.data.nbytes = nbytes;
357 ret = spi_mem_adjust_op_size(spinand->spimem, &op);
361 ret = spi_mem_exec_op(spinand->spimem, &op);
365 buf += op.data.nbytes;
366 nbytes -= op.data.nbytes;
367 op.addr.val += op.data.nbytes;
370 * We need to use the RANDOM LOAD CACHE operation if there's
371 * more than one iteration, because the LOAD operation might
372 * reset the cache to 0xff.
375 column = op.addr.val;
376 op = *spinand->op_templates.update_cache;
377 op.addr.val = column;
384 static int spinand_program_op(struct spinand_device *spinand,
385 const struct nand_page_io_req *req)
387 struct nand_device *nand = spinand_to_nand(spinand);
388 unsigned int row = nanddev_pos_to_row(nand, &req->pos);
389 struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
391 return spi_mem_exec_op(spinand->spimem, &op);
394 static int spinand_erase_op(struct spinand_device *spinand,
395 const struct nand_pos *pos)
397 struct nand_device *nand = spinand_to_nand(spinand);
398 unsigned int row = nanddev_pos_to_row(nand, pos);
399 struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
401 return spi_mem_exec_op(spinand->spimem, &op);
404 static int spinand_wait(struct spinand_device *spinand, u8 *s)
406 unsigned long timeo = jiffies + msecs_to_jiffies(400);
411 ret = spinand_read_status(spinand, &status);
415 if (!(status & STATUS_BUSY))
417 } while (time_before(jiffies, timeo));
420 * Extra read, just in case the STATUS_READY bit has changed
421 * since our last check
423 ret = spinand_read_status(spinand, &status);
431 return status & STATUS_BUSY ? -ETIMEDOUT : 0;
434 static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
436 struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
440 ret = spi_mem_exec_op(spinand->spimem, &op);
442 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
447 static int spinand_reset_op(struct spinand_device *spinand)
449 struct spi_mem_op op = SPINAND_RESET_OP;
452 ret = spi_mem_exec_op(spinand->spimem, &op);
456 return spinand_wait(spinand, NULL);
459 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
461 return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
464 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
466 struct nand_device *nand = spinand_to_nand(spinand);
468 if (spinand->eccinfo.get_status)
469 return spinand->eccinfo.get_status(spinand, status);
471 switch (status & STATUS_ECC_MASK) {
472 case STATUS_ECC_NO_BITFLIPS:
475 case STATUS_ECC_HAS_BITFLIPS:
477 * We have no way to know exactly how many bitflips have been
478 * fixed, so let's return the maximum possible value so that
479 * wear-leveling layers move the data immediately.
481 return nand->eccreq.strength;
483 case STATUS_ECC_UNCOR_ERROR:
493 static int spinand_read_page(struct spinand_device *spinand,
494 const struct nand_page_io_req *req,
500 ret = spinand_load_page_op(spinand, req);
504 ret = spinand_wait(spinand, &status);
508 ret = spinand_read_from_cache_op(spinand, req);
515 return spinand_check_ecc_status(spinand, status);
518 static int spinand_write_page(struct spinand_device *spinand,
519 const struct nand_page_io_req *req)
524 ret = spinand_write_enable_op(spinand);
528 ret = spinand_write_to_cache_op(spinand, req);
532 ret = spinand_program_op(spinand, req);
536 ret = spinand_wait(spinand, &status);
537 if (!ret && (status & STATUS_PROG_FAILED))
543 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
544 struct mtd_oob_ops *ops)
546 struct spinand_device *spinand = mtd_to_spinand(mtd);
547 struct nand_device *nand = mtd_to_nanddev(mtd);
548 unsigned int max_bitflips = 0;
549 struct nand_io_iter iter;
550 bool enable_ecc = false;
551 bool ecc_failed = false;
554 if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
557 mutex_lock(&spinand->lock);
559 nanddev_io_for_each_page(nand, from, ops, &iter) {
560 ret = spinand_select_target(spinand, iter.req.pos.target);
564 ret = spinand_ecc_enable(spinand, enable_ecc);
568 ret = spinand_read_page(spinand, &iter.req, enable_ecc);
569 if (ret < 0 && ret != -EBADMSG)
572 if (ret == -EBADMSG) {
574 mtd->ecc_stats.failed++;
577 mtd->ecc_stats.corrected += ret;
578 max_bitflips = max_t(unsigned int, max_bitflips, ret);
581 ops->retlen += iter.req.datalen;
582 ops->oobretlen += iter.req.ooblen;
585 mutex_unlock(&spinand->lock);
587 if (ecc_failed && !ret)
590 return ret ? ret : max_bitflips;
593 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
594 struct mtd_oob_ops *ops)
596 struct spinand_device *spinand = mtd_to_spinand(mtd);
597 struct nand_device *nand = mtd_to_nanddev(mtd);
598 struct nand_io_iter iter;
599 bool enable_ecc = false;
602 if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
605 mutex_lock(&spinand->lock);
607 nanddev_io_for_each_page(nand, to, ops, &iter) {
608 ret = spinand_select_target(spinand, iter.req.pos.target);
612 ret = spinand_ecc_enable(spinand, enable_ecc);
616 ret = spinand_write_page(spinand, &iter.req);
620 ops->retlen += iter.req.datalen;
621 ops->oobretlen += iter.req.ooblen;
624 mutex_unlock(&spinand->lock);
629 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
631 struct spinand_device *spinand = nand_to_spinand(nand);
632 struct nand_page_io_req req = {
636 .oobbuf.in = spinand->oobbuf,
640 memset(spinand->oobbuf, 0, 2);
641 spinand_select_target(spinand, pos->target);
642 spinand_read_page(spinand, &req, false);
643 if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
649 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
651 struct nand_device *nand = mtd_to_nanddev(mtd);
652 struct spinand_device *spinand = nand_to_spinand(nand);
656 nanddev_offs_to_pos(nand, offs, &pos);
657 mutex_lock(&spinand->lock);
658 ret = nanddev_isbad(nand, &pos);
659 mutex_unlock(&spinand->lock);
664 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
666 struct spinand_device *spinand = nand_to_spinand(nand);
667 struct nand_page_io_req req = {
671 .oobbuf.out = spinand->oobbuf,
675 /* Erase block before marking it bad. */
676 ret = spinand_select_target(spinand, pos->target);
680 ret = spinand_write_enable_op(spinand);
684 spinand_erase_op(spinand, pos);
686 memset(spinand->oobbuf, 0, 2);
687 return spinand_write_page(spinand, &req);
690 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
692 struct nand_device *nand = mtd_to_nanddev(mtd);
693 struct spinand_device *spinand = nand_to_spinand(nand);
697 nanddev_offs_to_pos(nand, offs, &pos);
698 mutex_lock(&spinand->lock);
699 ret = nanddev_markbad(nand, &pos);
700 mutex_unlock(&spinand->lock);
705 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
707 struct spinand_device *spinand = nand_to_spinand(nand);
711 ret = spinand_select_target(spinand, pos->target);
715 ret = spinand_write_enable_op(spinand);
719 ret = spinand_erase_op(spinand, pos);
723 ret = spinand_wait(spinand, &status);
724 if (!ret && (status & STATUS_ERASE_FAILED))
730 static int spinand_mtd_erase(struct mtd_info *mtd,
731 struct erase_info *einfo)
733 struct spinand_device *spinand = mtd_to_spinand(mtd);
736 mutex_lock(&spinand->lock);
737 ret = nanddev_mtd_erase(mtd, einfo);
738 mutex_unlock(&spinand->lock);
743 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
745 struct spinand_device *spinand = mtd_to_spinand(mtd);
746 struct nand_device *nand = mtd_to_nanddev(mtd);
750 nanddev_offs_to_pos(nand, offs, &pos);
751 mutex_lock(&spinand->lock);
752 ret = nanddev_isreserved(nand, &pos);
753 mutex_unlock(&spinand->lock);
758 static const struct nand_ops spinand_ops = {
759 .erase = spinand_erase,
760 .markbad = spinand_markbad,
761 .isbad = spinand_isbad,
764 static const struct spinand_manufacturer *spinand_manufacturers[] = {
765 &gigadevice_spinand_manufacturer,
766 ¯onix_spinand_manufacturer,
767 µn_spinand_manufacturer,
768 &toshiba_spinand_manufacturer,
769 &winbond_spinand_manufacturer,
772 static int spinand_manufacturer_detect(struct spinand_device *spinand)
777 for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
778 ret = spinand_manufacturers[i]->ops->detect(spinand);
780 spinand->manufacturer = spinand_manufacturers[i];
782 } else if (ret < 0) {
790 static int spinand_manufacturer_init(struct spinand_device *spinand)
792 if (spinand->manufacturer->ops->init)
793 return spinand->manufacturer->ops->init(spinand);
798 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
800 /* Release manufacturer private data */
801 if (spinand->manufacturer->ops->cleanup)
802 return spinand->manufacturer->ops->cleanup(spinand);
805 static const struct spi_mem_op *
806 spinand_select_op_variant(struct spinand_device *spinand,
807 const struct spinand_op_variants *variants)
809 struct nand_device *nand = spinand_to_nand(spinand);
812 for (i = 0; i < variants->nops; i++) {
813 struct spi_mem_op op = variants->ops[i];
817 nbytes = nanddev_per_page_oobsize(nand) +
818 nanddev_page_size(nand);
821 op.data.nbytes = nbytes;
822 ret = spi_mem_adjust_op_size(spinand->spimem, &op);
826 if (!spi_mem_supports_op(spinand->spimem, &op))
829 nbytes -= op.data.nbytes;
833 return &variants->ops[i];
840 * spinand_match_and_init() - Try to find a match between a device ID and an
841 * entry in a spinand_info table
842 * @spinand: SPI NAND object
843 * @table: SPI NAND device description table
844 * @table_size: size of the device description table
846 * Should be used by SPI NAND manufacturer drivers when they want to find a
847 * match between a device ID retrieved through the READ_ID command and an
848 * entry in the SPI NAND description table. If a match is found, the spinand
849 * object will be initialized with information provided by the matching
850 * spinand_info entry.
852 * Return: 0 on success, a negative error code otherwise.
854 int spinand_match_and_init(struct spinand_device *spinand,
855 const struct spinand_info *table,
856 unsigned int table_size, u8 devid)
858 struct nand_device *nand = spinand_to_nand(spinand);
861 for (i = 0; i < table_size; i++) {
862 const struct spinand_info *info = &table[i];
863 const struct spi_mem_op *op;
865 if (devid != info->devid)
868 nand->memorg = table[i].memorg;
869 nand->eccreq = table[i].eccreq;
870 spinand->eccinfo = table[i].eccinfo;
871 spinand->flags = table[i].flags;
872 spinand->select_target = table[i].select_target;
874 op = spinand_select_op_variant(spinand,
875 info->op_variants.read_cache);
879 spinand->op_templates.read_cache = op;
881 op = spinand_select_op_variant(spinand,
882 info->op_variants.write_cache);
886 spinand->op_templates.write_cache = op;
888 op = spinand_select_op_variant(spinand,
889 info->op_variants.update_cache);
890 spinand->op_templates.update_cache = op;
898 static int spinand_detect(struct spinand_device *spinand)
900 struct device *dev = &spinand->spimem->spi->dev;
901 struct nand_device *nand = spinand_to_nand(spinand);
904 ret = spinand_reset_op(spinand);
908 ret = spinand_read_id_op(spinand, spinand->id.data);
912 spinand->id.len = SPINAND_MAX_ID_LEN;
914 ret = spinand_manufacturer_detect(spinand);
916 dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
921 if (nand->memorg.ntargets > 1 && !spinand->select_target) {
923 "SPI NANDs with more than one die must implement ->select_target()\n");
927 dev_info(&spinand->spimem->spi->dev,
928 "%s SPI NAND was found.\n", spinand->manufacturer->name);
929 dev_info(&spinand->spimem->spi->dev,
930 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
931 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
932 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
937 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
938 struct mtd_oob_region *region)
943 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
944 struct mtd_oob_region *region)
949 /* Reserve 2 bytes for the BBM. */
956 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
957 .ecc = spinand_noecc_ooblayout_ecc,
958 .free = spinand_noecc_ooblayout_free,
961 static int spinand_init(struct spinand_device *spinand)
963 struct device *dev = &spinand->spimem->spi->dev;
964 struct mtd_info *mtd = spinand_to_mtd(spinand);
965 struct nand_device *nand = mtd_to_nanddev(mtd);
969 * We need a scratch buffer because the spi_mem interface requires that
970 * buf passed in spi_mem_op->data.buf be DMA-able.
972 spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
973 if (!spinand->scratchbuf)
976 ret = spinand_detect(spinand);
981 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
982 * may use this buffer for DMA access.
983 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
985 spinand->databuf = kzalloc(nanddev_page_size(nand) +
986 nanddev_per_page_oobsize(nand),
988 if (!spinand->databuf) {
993 spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
995 ret = spinand_init_cfg_cache(spinand);
999 ret = spinand_init_quad_enable(spinand);
1003 ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1007 ret = spinand_manufacturer_init(spinand);
1010 "Failed to initialize the SPI NAND chip (err = %d)\n",
1015 /* After power up, all blocks are locked, so unlock them here. */
1016 for (i = 0; i < nand->memorg.ntargets; i++) {
1017 ret = spinand_select_target(spinand, i);
1019 goto err_manuf_cleanup;
1021 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1023 goto err_manuf_cleanup;
1026 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1028 goto err_manuf_cleanup;
1031 * Right now, we don't support ECC, so let the whole oob
1032 * area is available for user.
1034 mtd->_read_oob = spinand_mtd_read;
1035 mtd->_write_oob = spinand_mtd_write;
1036 mtd->_block_isbad = spinand_mtd_block_isbad;
1037 mtd->_block_markbad = spinand_mtd_block_markbad;
1038 mtd->_block_isreserved = spinand_mtd_block_isreserved;
1039 mtd->_erase = spinand_mtd_erase;
1041 if (spinand->eccinfo.ooblayout)
1042 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
1044 mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
1046 ret = mtd_ooblayout_count_freebytes(mtd);
1048 goto err_cleanup_nanddev;
1050 mtd->oobavail = ret;
1054 err_cleanup_nanddev:
1055 nanddev_cleanup(nand);
1058 spinand_manufacturer_cleanup(spinand);
1061 kfree(spinand->databuf);
1062 kfree(spinand->scratchbuf);
1066 static void spinand_cleanup(struct spinand_device *spinand)
1068 struct nand_device *nand = spinand_to_nand(spinand);
1070 nanddev_cleanup(nand);
1071 spinand_manufacturer_cleanup(spinand);
1072 kfree(spinand->databuf);
1073 kfree(spinand->scratchbuf);
1076 static int spinand_probe(struct spi_mem *mem)
1078 struct spinand_device *spinand;
1079 struct mtd_info *mtd;
1082 spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1087 spinand->spimem = mem;
1088 spi_mem_set_drvdata(mem, spinand);
1089 spinand_set_of_node(spinand, mem->spi->dev.of_node);
1090 mutex_init(&spinand->lock);
1091 mtd = spinand_to_mtd(spinand);
1092 mtd->dev.parent = &mem->spi->dev;
1094 ret = spinand_init(spinand);
1098 ret = mtd_device_register(mtd, NULL, 0);
1100 goto err_spinand_cleanup;
1104 err_spinand_cleanup:
1105 spinand_cleanup(spinand);
1110 static int spinand_remove(struct spi_mem *mem)
1112 struct spinand_device *spinand;
1113 struct mtd_info *mtd;
1116 spinand = spi_mem_get_drvdata(mem);
1117 mtd = spinand_to_mtd(spinand);
1119 ret = mtd_device_unregister(mtd);
1123 spinand_cleanup(spinand);
1128 static const struct spi_device_id spinand_ids[] = {
1129 { .name = "spi-nand" },
1134 static const struct of_device_id spinand_of_ids[] = {
1135 { .compatible = "spi-nand" },
1140 static struct spi_mem_driver spinand_drv = {
1142 .id_table = spinand_ids,
1145 .of_match_table = of_match_ptr(spinand_of_ids),
1148 .probe = spinand_probe,
1149 .remove = spinand_remove,
1151 module_spi_mem_driver(spinand_drv);
1153 MODULE_DESCRIPTION("SPI NAND framework");
1154 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1155 MODULE_LICENSE("GPL v2");