]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/spi/spi.c
Merge tag 'v5.2' into next
[linux.git] / drivers / spi / spi.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7 #include <linux/kernel.h>
8 #include <linux/device.h>
9 #include <linux/init.h>
10 #include <linux/cache.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/mutex.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/clk/clk-conf.h>
17 #include <linux/slab.h>
18 #include <linux/mod_devicetable.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spi/spi-mem.h>
21 #include <linux/of_gpio.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/pm_domain.h>
25 #include <linux/property.h>
26 #include <linux/export.h>
27 #include <linux/sched/rt.h>
28 #include <uapi/linux/sched/types.h>
29 #include <linux/delay.h>
30 #include <linux/kthread.h>
31 #include <linux/ioport.h>
32 #include <linux/acpi.h>
33 #include <linux/highmem.h>
34 #include <linux/idr.h>
35 #include <linux/platform_data/x86/apple.h>
36
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/spi.h>
39 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
41
42 #include "internals.h"
43
44 static DEFINE_IDR(spi_master_idr);
45
46 static void spidev_release(struct device *dev)
47 {
48         struct spi_device       *spi = to_spi_device(dev);
49
50         /* spi controllers may cleanup for released devices */
51         if (spi->controller->cleanup)
52                 spi->controller->cleanup(spi);
53
54         spi_controller_put(spi->controller);
55         kfree(spi->driver_override);
56         kfree(spi);
57 }
58
59 static ssize_t
60 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
61 {
62         const struct spi_device *spi = to_spi_device(dev);
63         int len;
64
65         len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
66         if (len != -ENODEV)
67                 return len;
68
69         return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
70 }
71 static DEVICE_ATTR_RO(modalias);
72
73 static ssize_t driver_override_store(struct device *dev,
74                                      struct device_attribute *a,
75                                      const char *buf, size_t count)
76 {
77         struct spi_device *spi = to_spi_device(dev);
78         const char *end = memchr(buf, '\n', count);
79         const size_t len = end ? end - buf : count;
80         const char *driver_override, *old;
81
82         /* We need to keep extra room for a newline when displaying value */
83         if (len >= (PAGE_SIZE - 1))
84                 return -EINVAL;
85
86         driver_override = kstrndup(buf, len, GFP_KERNEL);
87         if (!driver_override)
88                 return -ENOMEM;
89
90         device_lock(dev);
91         old = spi->driver_override;
92         if (len) {
93                 spi->driver_override = driver_override;
94         } else {
95                 /* Emptry string, disable driver override */
96                 spi->driver_override = NULL;
97                 kfree(driver_override);
98         }
99         device_unlock(dev);
100         kfree(old);
101
102         return count;
103 }
104
105 static ssize_t driver_override_show(struct device *dev,
106                                     struct device_attribute *a, char *buf)
107 {
108         const struct spi_device *spi = to_spi_device(dev);
109         ssize_t len;
110
111         device_lock(dev);
112         len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
113         device_unlock(dev);
114         return len;
115 }
116 static DEVICE_ATTR_RW(driver_override);
117
118 #define SPI_STATISTICS_ATTRS(field, file)                               \
119 static ssize_t spi_controller_##field##_show(struct device *dev,        \
120                                              struct device_attribute *attr, \
121                                              char *buf)                 \
122 {                                                                       \
123         struct spi_controller *ctlr = container_of(dev,                 \
124                                          struct spi_controller, dev);   \
125         return spi_statistics_##field##_show(&ctlr->statistics, buf);   \
126 }                                                                       \
127 static struct device_attribute dev_attr_spi_controller_##field = {      \
128         .attr = { .name = file, .mode = 0444 },                         \
129         .show = spi_controller_##field##_show,                          \
130 };                                                                      \
131 static ssize_t spi_device_##field##_show(struct device *dev,            \
132                                          struct device_attribute *attr, \
133                                         char *buf)                      \
134 {                                                                       \
135         struct spi_device *spi = to_spi_device(dev);                    \
136         return spi_statistics_##field##_show(&spi->statistics, buf);    \
137 }                                                                       \
138 static struct device_attribute dev_attr_spi_device_##field = {          \
139         .attr = { .name = file, .mode = 0444 },                         \
140         .show = spi_device_##field##_show,                              \
141 }
142
143 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)      \
144 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
145                                             char *buf)                  \
146 {                                                                       \
147         unsigned long flags;                                            \
148         ssize_t len;                                                    \
149         spin_lock_irqsave(&stat->lock, flags);                          \
150         len = sprintf(buf, format_string, stat->field);                 \
151         spin_unlock_irqrestore(&stat->lock, flags);                     \
152         return len;                                                     \
153 }                                                                       \
154 SPI_STATISTICS_ATTRS(name, file)
155
156 #define SPI_STATISTICS_SHOW(field, format_string)                       \
157         SPI_STATISTICS_SHOW_NAME(field, __stringify(field),             \
158                                  field, format_string)
159
160 SPI_STATISTICS_SHOW(messages, "%lu");
161 SPI_STATISTICS_SHOW(transfers, "%lu");
162 SPI_STATISTICS_SHOW(errors, "%lu");
163 SPI_STATISTICS_SHOW(timedout, "%lu");
164
165 SPI_STATISTICS_SHOW(spi_sync, "%lu");
166 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
167 SPI_STATISTICS_SHOW(spi_async, "%lu");
168
169 SPI_STATISTICS_SHOW(bytes, "%llu");
170 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
171 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
172
173 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)              \
174         SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,           \
175                                  "transfer_bytes_histo_" number,        \
176                                  transfer_bytes_histo[index],  "%lu")
177 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
178 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
179 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
180 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
181 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
182 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
183 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
184 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
185 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
186 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
187 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
188 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
189 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
190 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
191 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
192 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
193 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
194
195 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
196
197 static struct attribute *spi_dev_attrs[] = {
198         &dev_attr_modalias.attr,
199         &dev_attr_driver_override.attr,
200         NULL,
201 };
202
203 static const struct attribute_group spi_dev_group = {
204         .attrs  = spi_dev_attrs,
205 };
206
207 static struct attribute *spi_device_statistics_attrs[] = {
208         &dev_attr_spi_device_messages.attr,
209         &dev_attr_spi_device_transfers.attr,
210         &dev_attr_spi_device_errors.attr,
211         &dev_attr_spi_device_timedout.attr,
212         &dev_attr_spi_device_spi_sync.attr,
213         &dev_attr_spi_device_spi_sync_immediate.attr,
214         &dev_attr_spi_device_spi_async.attr,
215         &dev_attr_spi_device_bytes.attr,
216         &dev_attr_spi_device_bytes_rx.attr,
217         &dev_attr_spi_device_bytes_tx.attr,
218         &dev_attr_spi_device_transfer_bytes_histo0.attr,
219         &dev_attr_spi_device_transfer_bytes_histo1.attr,
220         &dev_attr_spi_device_transfer_bytes_histo2.attr,
221         &dev_attr_spi_device_transfer_bytes_histo3.attr,
222         &dev_attr_spi_device_transfer_bytes_histo4.attr,
223         &dev_attr_spi_device_transfer_bytes_histo5.attr,
224         &dev_attr_spi_device_transfer_bytes_histo6.attr,
225         &dev_attr_spi_device_transfer_bytes_histo7.attr,
226         &dev_attr_spi_device_transfer_bytes_histo8.attr,
227         &dev_attr_spi_device_transfer_bytes_histo9.attr,
228         &dev_attr_spi_device_transfer_bytes_histo10.attr,
229         &dev_attr_spi_device_transfer_bytes_histo11.attr,
230         &dev_attr_spi_device_transfer_bytes_histo12.attr,
231         &dev_attr_spi_device_transfer_bytes_histo13.attr,
232         &dev_attr_spi_device_transfer_bytes_histo14.attr,
233         &dev_attr_spi_device_transfer_bytes_histo15.attr,
234         &dev_attr_spi_device_transfer_bytes_histo16.attr,
235         &dev_attr_spi_device_transfers_split_maxsize.attr,
236         NULL,
237 };
238
239 static const struct attribute_group spi_device_statistics_group = {
240         .name  = "statistics",
241         .attrs  = spi_device_statistics_attrs,
242 };
243
244 static const struct attribute_group *spi_dev_groups[] = {
245         &spi_dev_group,
246         &spi_device_statistics_group,
247         NULL,
248 };
249
250 static struct attribute *spi_controller_statistics_attrs[] = {
251         &dev_attr_spi_controller_messages.attr,
252         &dev_attr_spi_controller_transfers.attr,
253         &dev_attr_spi_controller_errors.attr,
254         &dev_attr_spi_controller_timedout.attr,
255         &dev_attr_spi_controller_spi_sync.attr,
256         &dev_attr_spi_controller_spi_sync_immediate.attr,
257         &dev_attr_spi_controller_spi_async.attr,
258         &dev_attr_spi_controller_bytes.attr,
259         &dev_attr_spi_controller_bytes_rx.attr,
260         &dev_attr_spi_controller_bytes_tx.attr,
261         &dev_attr_spi_controller_transfer_bytes_histo0.attr,
262         &dev_attr_spi_controller_transfer_bytes_histo1.attr,
263         &dev_attr_spi_controller_transfer_bytes_histo2.attr,
264         &dev_attr_spi_controller_transfer_bytes_histo3.attr,
265         &dev_attr_spi_controller_transfer_bytes_histo4.attr,
266         &dev_attr_spi_controller_transfer_bytes_histo5.attr,
267         &dev_attr_spi_controller_transfer_bytes_histo6.attr,
268         &dev_attr_spi_controller_transfer_bytes_histo7.attr,
269         &dev_attr_spi_controller_transfer_bytes_histo8.attr,
270         &dev_attr_spi_controller_transfer_bytes_histo9.attr,
271         &dev_attr_spi_controller_transfer_bytes_histo10.attr,
272         &dev_attr_spi_controller_transfer_bytes_histo11.attr,
273         &dev_attr_spi_controller_transfer_bytes_histo12.attr,
274         &dev_attr_spi_controller_transfer_bytes_histo13.attr,
275         &dev_attr_spi_controller_transfer_bytes_histo14.attr,
276         &dev_attr_spi_controller_transfer_bytes_histo15.attr,
277         &dev_attr_spi_controller_transfer_bytes_histo16.attr,
278         &dev_attr_spi_controller_transfers_split_maxsize.attr,
279         NULL,
280 };
281
282 static const struct attribute_group spi_controller_statistics_group = {
283         .name  = "statistics",
284         .attrs  = spi_controller_statistics_attrs,
285 };
286
287 static const struct attribute_group *spi_master_groups[] = {
288         &spi_controller_statistics_group,
289         NULL,
290 };
291
292 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
293                                        struct spi_transfer *xfer,
294                                        struct spi_controller *ctlr)
295 {
296         unsigned long flags;
297         int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
298
299         if (l2len < 0)
300                 l2len = 0;
301
302         spin_lock_irqsave(&stats->lock, flags);
303
304         stats->transfers++;
305         stats->transfer_bytes_histo[l2len]++;
306
307         stats->bytes += xfer->len;
308         if ((xfer->tx_buf) &&
309             (xfer->tx_buf != ctlr->dummy_tx))
310                 stats->bytes_tx += xfer->len;
311         if ((xfer->rx_buf) &&
312             (xfer->rx_buf != ctlr->dummy_rx))
313                 stats->bytes_rx += xfer->len;
314
315         spin_unlock_irqrestore(&stats->lock, flags);
316 }
317 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
318
319 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
320  * and the sysfs version makes coldplug work too.
321  */
322
323 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
324                                                 const struct spi_device *sdev)
325 {
326         while (id->name[0]) {
327                 if (!strcmp(sdev->modalias, id->name))
328                         return id;
329                 id++;
330         }
331         return NULL;
332 }
333
334 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
335 {
336         const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
337
338         return spi_match_id(sdrv->id_table, sdev);
339 }
340 EXPORT_SYMBOL_GPL(spi_get_device_id);
341
342 static int spi_match_device(struct device *dev, struct device_driver *drv)
343 {
344         const struct spi_device *spi = to_spi_device(dev);
345         const struct spi_driver *sdrv = to_spi_driver(drv);
346
347         /* Check override first, and if set, only use the named driver */
348         if (spi->driver_override)
349                 return strcmp(spi->driver_override, drv->name) == 0;
350
351         /* Attempt an OF style match */
352         if (of_driver_match_device(dev, drv))
353                 return 1;
354
355         /* Then try ACPI */
356         if (acpi_driver_match_device(dev, drv))
357                 return 1;
358
359         if (sdrv->id_table)
360                 return !!spi_match_id(sdrv->id_table, spi);
361
362         return strcmp(spi->modalias, drv->name) == 0;
363 }
364
365 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
366 {
367         const struct spi_device         *spi = to_spi_device(dev);
368         int rc;
369
370         rc = acpi_device_uevent_modalias(dev, env);
371         if (rc != -ENODEV)
372                 return rc;
373
374         return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
375 }
376
377 struct bus_type spi_bus_type = {
378         .name           = "spi",
379         .dev_groups     = spi_dev_groups,
380         .match          = spi_match_device,
381         .uevent         = spi_uevent,
382 };
383 EXPORT_SYMBOL_GPL(spi_bus_type);
384
385
386 static int spi_drv_probe(struct device *dev)
387 {
388         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
389         struct spi_device               *spi = to_spi_device(dev);
390         int ret;
391
392         ret = of_clk_set_defaults(dev->of_node, false);
393         if (ret)
394                 return ret;
395
396         if (dev->of_node) {
397                 spi->irq = of_irq_get(dev->of_node, 0);
398                 if (spi->irq == -EPROBE_DEFER)
399                         return -EPROBE_DEFER;
400                 if (spi->irq < 0)
401                         spi->irq = 0;
402         }
403
404         ret = dev_pm_domain_attach(dev, true);
405         if (ret)
406                 return ret;
407
408         ret = sdrv->probe(spi);
409         if (ret)
410                 dev_pm_domain_detach(dev, true);
411
412         return ret;
413 }
414
415 static int spi_drv_remove(struct device *dev)
416 {
417         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
418         int ret;
419
420         ret = sdrv->remove(to_spi_device(dev));
421         dev_pm_domain_detach(dev, true);
422
423         return ret;
424 }
425
426 static void spi_drv_shutdown(struct device *dev)
427 {
428         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
429
430         sdrv->shutdown(to_spi_device(dev));
431 }
432
433 /**
434  * __spi_register_driver - register a SPI driver
435  * @owner: owner module of the driver to register
436  * @sdrv: the driver to register
437  * Context: can sleep
438  *
439  * Return: zero on success, else a negative error code.
440  */
441 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
442 {
443         sdrv->driver.owner = owner;
444         sdrv->driver.bus = &spi_bus_type;
445         if (sdrv->probe)
446                 sdrv->driver.probe = spi_drv_probe;
447         if (sdrv->remove)
448                 sdrv->driver.remove = spi_drv_remove;
449         if (sdrv->shutdown)
450                 sdrv->driver.shutdown = spi_drv_shutdown;
451         return driver_register(&sdrv->driver);
452 }
453 EXPORT_SYMBOL_GPL(__spi_register_driver);
454
455 /*-------------------------------------------------------------------------*/
456
457 /* SPI devices should normally not be created by SPI device drivers; that
458  * would make them board-specific.  Similarly with SPI controller drivers.
459  * Device registration normally goes into like arch/.../mach.../board-YYY.c
460  * with other readonly (flashable) information about mainboard devices.
461  */
462
463 struct boardinfo {
464         struct list_head        list;
465         struct spi_board_info   board_info;
466 };
467
468 static LIST_HEAD(board_list);
469 static LIST_HEAD(spi_controller_list);
470
471 /*
472  * Used to protect add/del opertion for board_info list and
473  * spi_controller list, and their matching process
474  * also used to protect object of type struct idr
475  */
476 static DEFINE_MUTEX(board_lock);
477
478 /**
479  * spi_alloc_device - Allocate a new SPI device
480  * @ctlr: Controller to which device is connected
481  * Context: can sleep
482  *
483  * Allows a driver to allocate and initialize a spi_device without
484  * registering it immediately.  This allows a driver to directly
485  * fill the spi_device with device parameters before calling
486  * spi_add_device() on it.
487  *
488  * Caller is responsible to call spi_add_device() on the returned
489  * spi_device structure to add it to the SPI controller.  If the caller
490  * needs to discard the spi_device without adding it, then it should
491  * call spi_dev_put() on it.
492  *
493  * Return: a pointer to the new device, or NULL.
494  */
495 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
496 {
497         struct spi_device       *spi;
498
499         if (!spi_controller_get(ctlr))
500                 return NULL;
501
502         spi = kzalloc(sizeof(*spi), GFP_KERNEL);
503         if (!spi) {
504                 spi_controller_put(ctlr);
505                 return NULL;
506         }
507
508         spi->master = spi->controller = ctlr;
509         spi->dev.parent = &ctlr->dev;
510         spi->dev.bus = &spi_bus_type;
511         spi->dev.release = spidev_release;
512         spi->cs_gpio = -ENOENT;
513
514         spin_lock_init(&spi->statistics.lock);
515
516         device_initialize(&spi->dev);
517         return spi;
518 }
519 EXPORT_SYMBOL_GPL(spi_alloc_device);
520
521 static void spi_dev_set_name(struct spi_device *spi)
522 {
523         struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
524
525         if (adev) {
526                 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
527                 return;
528         }
529
530         dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
531                      spi->chip_select);
532 }
533
534 static int spi_dev_check(struct device *dev, void *data)
535 {
536         struct spi_device *spi = to_spi_device(dev);
537         struct spi_device *new_spi = data;
538
539         if (spi->controller == new_spi->controller &&
540             spi->chip_select == new_spi->chip_select)
541                 return -EBUSY;
542         return 0;
543 }
544
545 /**
546  * spi_add_device - Add spi_device allocated with spi_alloc_device
547  * @spi: spi_device to register
548  *
549  * Companion function to spi_alloc_device.  Devices allocated with
550  * spi_alloc_device can be added onto the spi bus with this function.
551  *
552  * Return: 0 on success; negative errno on failure
553  */
554 int spi_add_device(struct spi_device *spi)
555 {
556         static DEFINE_MUTEX(spi_add_lock);
557         struct spi_controller *ctlr = spi->controller;
558         struct device *dev = ctlr->dev.parent;
559         int status;
560
561         /* Chipselects are numbered 0..max; validate. */
562         if (spi->chip_select >= ctlr->num_chipselect) {
563                 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
564                         ctlr->num_chipselect);
565                 return -EINVAL;
566         }
567
568         /* Set the bus ID string */
569         spi_dev_set_name(spi);
570
571         /* We need to make sure there's no other device with this
572          * chipselect **BEFORE** we call setup(), else we'll trash
573          * its configuration.  Lock against concurrent add() calls.
574          */
575         mutex_lock(&spi_add_lock);
576
577         status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
578         if (status) {
579                 dev_err(dev, "chipselect %d already in use\n",
580                                 spi->chip_select);
581                 goto done;
582         }
583
584         /* Descriptors take precedence */
585         if (ctlr->cs_gpiods)
586                 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
587         else if (ctlr->cs_gpios)
588                 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
589
590         /* Drivers may modify this initial i/o setup, but will
591          * normally rely on the device being setup.  Devices
592          * using SPI_CS_HIGH can't coexist well otherwise...
593          */
594         status = spi_setup(spi);
595         if (status < 0) {
596                 dev_err(dev, "can't setup %s, status %d\n",
597                                 dev_name(&spi->dev), status);
598                 goto done;
599         }
600
601         /* Device may be bound to an active driver when this returns */
602         status = device_add(&spi->dev);
603         if (status < 0)
604                 dev_err(dev, "can't add %s, status %d\n",
605                                 dev_name(&spi->dev), status);
606         else
607                 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
608
609 done:
610         mutex_unlock(&spi_add_lock);
611         return status;
612 }
613 EXPORT_SYMBOL_GPL(spi_add_device);
614
615 /**
616  * spi_new_device - instantiate one new SPI device
617  * @ctlr: Controller to which device is connected
618  * @chip: Describes the SPI device
619  * Context: can sleep
620  *
621  * On typical mainboards, this is purely internal; and it's not needed
622  * after board init creates the hard-wired devices.  Some development
623  * platforms may not be able to use spi_register_board_info though, and
624  * this is exported so that for example a USB or parport based adapter
625  * driver could add devices (which it would learn about out-of-band).
626  *
627  * Return: the new device, or NULL.
628  */
629 struct spi_device *spi_new_device(struct spi_controller *ctlr,
630                                   struct spi_board_info *chip)
631 {
632         struct spi_device       *proxy;
633         int                     status;
634
635         /* NOTE:  caller did any chip->bus_num checks necessary.
636          *
637          * Also, unless we change the return value convention to use
638          * error-or-pointer (not NULL-or-pointer), troubleshootability
639          * suggests syslogged diagnostics are best here (ugh).
640          */
641
642         proxy = spi_alloc_device(ctlr);
643         if (!proxy)
644                 return NULL;
645
646         WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
647
648         proxy->chip_select = chip->chip_select;
649         proxy->max_speed_hz = chip->max_speed_hz;
650         proxy->mode = chip->mode;
651         proxy->irq = chip->irq;
652         strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
653         proxy->dev.platform_data = (void *) chip->platform_data;
654         proxy->controller_data = chip->controller_data;
655         proxy->controller_state = NULL;
656
657         if (chip->properties) {
658                 status = device_add_properties(&proxy->dev, chip->properties);
659                 if (status) {
660                         dev_err(&ctlr->dev,
661                                 "failed to add properties to '%s': %d\n",
662                                 chip->modalias, status);
663                         goto err_dev_put;
664                 }
665         }
666
667         status = spi_add_device(proxy);
668         if (status < 0)
669                 goto err_remove_props;
670
671         return proxy;
672
673 err_remove_props:
674         if (chip->properties)
675                 device_remove_properties(&proxy->dev);
676 err_dev_put:
677         spi_dev_put(proxy);
678         return NULL;
679 }
680 EXPORT_SYMBOL_GPL(spi_new_device);
681
682 /**
683  * spi_unregister_device - unregister a single SPI device
684  * @spi: spi_device to unregister
685  *
686  * Start making the passed SPI device vanish. Normally this would be handled
687  * by spi_unregister_controller().
688  */
689 void spi_unregister_device(struct spi_device *spi)
690 {
691         if (!spi)
692                 return;
693
694         if (spi->dev.of_node) {
695                 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
696                 of_node_put(spi->dev.of_node);
697         }
698         if (ACPI_COMPANION(&spi->dev))
699                 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
700         device_unregister(&spi->dev);
701 }
702 EXPORT_SYMBOL_GPL(spi_unregister_device);
703
704 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
705                                               struct spi_board_info *bi)
706 {
707         struct spi_device *dev;
708
709         if (ctlr->bus_num != bi->bus_num)
710                 return;
711
712         dev = spi_new_device(ctlr, bi);
713         if (!dev)
714                 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
715                         bi->modalias);
716 }
717
718 /**
719  * spi_register_board_info - register SPI devices for a given board
720  * @info: array of chip descriptors
721  * @n: how many descriptors are provided
722  * Context: can sleep
723  *
724  * Board-specific early init code calls this (probably during arch_initcall)
725  * with segments of the SPI device table.  Any device nodes are created later,
726  * after the relevant parent SPI controller (bus_num) is defined.  We keep
727  * this table of devices forever, so that reloading a controller driver will
728  * not make Linux forget about these hard-wired devices.
729  *
730  * Other code can also call this, e.g. a particular add-on board might provide
731  * SPI devices through its expansion connector, so code initializing that board
732  * would naturally declare its SPI devices.
733  *
734  * The board info passed can safely be __initdata ... but be careful of
735  * any embedded pointers (platform_data, etc), they're copied as-is.
736  * Device properties are deep-copied though.
737  *
738  * Return: zero on success, else a negative error code.
739  */
740 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
741 {
742         struct boardinfo *bi;
743         int i;
744
745         if (!n)
746                 return 0;
747
748         bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
749         if (!bi)
750                 return -ENOMEM;
751
752         for (i = 0; i < n; i++, bi++, info++) {
753                 struct spi_controller *ctlr;
754
755                 memcpy(&bi->board_info, info, sizeof(*info));
756                 if (info->properties) {
757                         bi->board_info.properties =
758                                         property_entries_dup(info->properties);
759                         if (IS_ERR(bi->board_info.properties))
760                                 return PTR_ERR(bi->board_info.properties);
761                 }
762
763                 mutex_lock(&board_lock);
764                 list_add_tail(&bi->list, &board_list);
765                 list_for_each_entry(ctlr, &spi_controller_list, list)
766                         spi_match_controller_to_boardinfo(ctlr,
767                                                           &bi->board_info);
768                 mutex_unlock(&board_lock);
769         }
770
771         return 0;
772 }
773
774 /*-------------------------------------------------------------------------*/
775
776 static void spi_set_cs(struct spi_device *spi, bool enable)
777 {
778         if (spi->mode & SPI_CS_HIGH)
779                 enable = !enable;
780
781         if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
782                 /*
783                  * Honour the SPI_NO_CS flag and invert the enable line, as
784                  * active low is default for SPI. Execution paths that handle
785                  * polarity inversion in gpiolib (such as device tree) will
786                  * enforce active high using the SPI_CS_HIGH resulting in a
787                  * double inversion through the code above.
788                  */
789                 if (!(spi->mode & SPI_NO_CS)) {
790                         if (spi->cs_gpiod)
791                                 gpiod_set_value_cansleep(spi->cs_gpiod,
792                                                          !enable);
793                         else
794                                 gpio_set_value_cansleep(spi->cs_gpio, !enable);
795                 }
796                 /* Some SPI masters need both GPIO CS & slave_select */
797                 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
798                     spi->controller->set_cs)
799                         spi->controller->set_cs(spi, !enable);
800         } else if (spi->controller->set_cs) {
801                 spi->controller->set_cs(spi, !enable);
802         }
803 }
804
805 #ifdef CONFIG_HAS_DMA
806 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
807                 struct sg_table *sgt, void *buf, size_t len,
808                 enum dma_data_direction dir)
809 {
810         const bool vmalloced_buf = is_vmalloc_addr(buf);
811         unsigned int max_seg_size = dma_get_max_seg_size(dev);
812 #ifdef CONFIG_HIGHMEM
813         const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
814                                 (unsigned long)buf < (PKMAP_BASE +
815                                         (LAST_PKMAP * PAGE_SIZE)));
816 #else
817         const bool kmap_buf = false;
818 #endif
819         int desc_len;
820         int sgs;
821         struct page *vm_page;
822         struct scatterlist *sg;
823         void *sg_buf;
824         size_t min;
825         int i, ret;
826
827         if (vmalloced_buf || kmap_buf) {
828                 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
829                 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
830         } else if (virt_addr_valid(buf)) {
831                 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
832                 sgs = DIV_ROUND_UP(len, desc_len);
833         } else {
834                 return -EINVAL;
835         }
836
837         ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
838         if (ret != 0)
839                 return ret;
840
841         sg = &sgt->sgl[0];
842         for (i = 0; i < sgs; i++) {
843
844                 if (vmalloced_buf || kmap_buf) {
845                         /*
846                          * Next scatterlist entry size is the minimum between
847                          * the desc_len and the remaining buffer length that
848                          * fits in a page.
849                          */
850                         min = min_t(size_t, desc_len,
851                                     min_t(size_t, len,
852                                           PAGE_SIZE - offset_in_page(buf)));
853                         if (vmalloced_buf)
854                                 vm_page = vmalloc_to_page(buf);
855                         else
856                                 vm_page = kmap_to_page(buf);
857                         if (!vm_page) {
858                                 sg_free_table(sgt);
859                                 return -ENOMEM;
860                         }
861                         sg_set_page(sg, vm_page,
862                                     min, offset_in_page(buf));
863                 } else {
864                         min = min_t(size_t, len, desc_len);
865                         sg_buf = buf;
866                         sg_set_buf(sg, sg_buf, min);
867                 }
868
869                 buf += min;
870                 len -= min;
871                 sg = sg_next(sg);
872         }
873
874         ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
875         if (!ret)
876                 ret = -ENOMEM;
877         if (ret < 0) {
878                 sg_free_table(sgt);
879                 return ret;
880         }
881
882         sgt->nents = ret;
883
884         return 0;
885 }
886
887 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
888                    struct sg_table *sgt, enum dma_data_direction dir)
889 {
890         if (sgt->orig_nents) {
891                 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
892                 sg_free_table(sgt);
893         }
894 }
895
896 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
897 {
898         struct device *tx_dev, *rx_dev;
899         struct spi_transfer *xfer;
900         int ret;
901
902         if (!ctlr->can_dma)
903                 return 0;
904
905         if (ctlr->dma_tx)
906                 tx_dev = ctlr->dma_tx->device->dev;
907         else
908                 tx_dev = ctlr->dev.parent;
909
910         if (ctlr->dma_rx)
911                 rx_dev = ctlr->dma_rx->device->dev;
912         else
913                 rx_dev = ctlr->dev.parent;
914
915         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
916                 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
917                         continue;
918
919                 if (xfer->tx_buf != NULL) {
920                         ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
921                                           (void *)xfer->tx_buf, xfer->len,
922                                           DMA_TO_DEVICE);
923                         if (ret != 0)
924                                 return ret;
925                 }
926
927                 if (xfer->rx_buf != NULL) {
928                         ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
929                                           xfer->rx_buf, xfer->len,
930                                           DMA_FROM_DEVICE);
931                         if (ret != 0) {
932                                 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
933                                               DMA_TO_DEVICE);
934                                 return ret;
935                         }
936                 }
937         }
938
939         ctlr->cur_msg_mapped = true;
940
941         return 0;
942 }
943
944 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
945 {
946         struct spi_transfer *xfer;
947         struct device *tx_dev, *rx_dev;
948
949         if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
950                 return 0;
951
952         if (ctlr->dma_tx)
953                 tx_dev = ctlr->dma_tx->device->dev;
954         else
955                 tx_dev = ctlr->dev.parent;
956
957         if (ctlr->dma_rx)
958                 rx_dev = ctlr->dma_rx->device->dev;
959         else
960                 rx_dev = ctlr->dev.parent;
961
962         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
963                 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
964                         continue;
965
966                 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
967                 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
968         }
969
970         return 0;
971 }
972 #else /* !CONFIG_HAS_DMA */
973 static inline int __spi_map_msg(struct spi_controller *ctlr,
974                                 struct spi_message *msg)
975 {
976         return 0;
977 }
978
979 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
980                                   struct spi_message *msg)
981 {
982         return 0;
983 }
984 #endif /* !CONFIG_HAS_DMA */
985
986 static inline int spi_unmap_msg(struct spi_controller *ctlr,
987                                 struct spi_message *msg)
988 {
989         struct spi_transfer *xfer;
990
991         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
992                 /*
993                  * Restore the original value of tx_buf or rx_buf if they are
994                  * NULL.
995                  */
996                 if (xfer->tx_buf == ctlr->dummy_tx)
997                         xfer->tx_buf = NULL;
998                 if (xfer->rx_buf == ctlr->dummy_rx)
999                         xfer->rx_buf = NULL;
1000         }
1001
1002         return __spi_unmap_msg(ctlr, msg);
1003 }
1004
1005 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1006 {
1007         struct spi_transfer *xfer;
1008         void *tmp;
1009         unsigned int max_tx, max_rx;
1010
1011         if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
1012                 max_tx = 0;
1013                 max_rx = 0;
1014
1015                 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1016                         if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1017                             !xfer->tx_buf)
1018                                 max_tx = max(xfer->len, max_tx);
1019                         if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1020                             !xfer->rx_buf)
1021                                 max_rx = max(xfer->len, max_rx);
1022                 }
1023
1024                 if (max_tx) {
1025                         tmp = krealloc(ctlr->dummy_tx, max_tx,
1026                                        GFP_KERNEL | GFP_DMA);
1027                         if (!tmp)
1028                                 return -ENOMEM;
1029                         ctlr->dummy_tx = tmp;
1030                         memset(tmp, 0, max_tx);
1031                 }
1032
1033                 if (max_rx) {
1034                         tmp = krealloc(ctlr->dummy_rx, max_rx,
1035                                        GFP_KERNEL | GFP_DMA);
1036                         if (!tmp)
1037                                 return -ENOMEM;
1038                         ctlr->dummy_rx = tmp;
1039                 }
1040
1041                 if (max_tx || max_rx) {
1042                         list_for_each_entry(xfer, &msg->transfers,
1043                                             transfer_list) {
1044                                 if (!xfer->len)
1045                                         continue;
1046                                 if (!xfer->tx_buf)
1047                                         xfer->tx_buf = ctlr->dummy_tx;
1048                                 if (!xfer->rx_buf)
1049                                         xfer->rx_buf = ctlr->dummy_rx;
1050                         }
1051                 }
1052         }
1053
1054         return __spi_map_msg(ctlr, msg);
1055 }
1056
1057 static int spi_transfer_wait(struct spi_controller *ctlr,
1058                              struct spi_message *msg,
1059                              struct spi_transfer *xfer)
1060 {
1061         struct spi_statistics *statm = &ctlr->statistics;
1062         struct spi_statistics *stats = &msg->spi->statistics;
1063         unsigned long long ms = 1;
1064
1065         if (spi_controller_is_slave(ctlr)) {
1066                 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1067                         dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1068                         return -EINTR;
1069                 }
1070         } else {
1071                 ms = 8LL * 1000LL * xfer->len;
1072                 do_div(ms, xfer->speed_hz);
1073                 ms += ms + 200; /* some tolerance */
1074
1075                 if (ms > UINT_MAX)
1076                         ms = UINT_MAX;
1077
1078                 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1079                                                  msecs_to_jiffies(ms));
1080
1081                 if (ms == 0) {
1082                         SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1083                         SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1084                         dev_err(&msg->spi->dev,
1085                                 "SPI transfer timed out\n");
1086                         return -ETIMEDOUT;
1087                 }
1088         }
1089
1090         return 0;
1091 }
1092
1093 /*
1094  * spi_transfer_one_message - Default implementation of transfer_one_message()
1095  *
1096  * This is a standard implementation of transfer_one_message() for
1097  * drivers which implement a transfer_one() operation.  It provides
1098  * standard handling of delays and chip select management.
1099  */
1100 static int spi_transfer_one_message(struct spi_controller *ctlr,
1101                                     struct spi_message *msg)
1102 {
1103         struct spi_transfer *xfer;
1104         bool keep_cs = false;
1105         int ret = 0;
1106         struct spi_statistics *statm = &ctlr->statistics;
1107         struct spi_statistics *stats = &msg->spi->statistics;
1108
1109         spi_set_cs(msg->spi, true);
1110
1111         SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1112         SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1113
1114         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1115                 trace_spi_transfer_start(msg, xfer);
1116
1117                 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1118                 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1119
1120                 if (xfer->tx_buf || xfer->rx_buf) {
1121                         reinit_completion(&ctlr->xfer_completion);
1122
1123                         ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1124                         if (ret < 0) {
1125                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
1126                                                                errors);
1127                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
1128                                                                errors);
1129                                 dev_err(&msg->spi->dev,
1130                                         "SPI transfer failed: %d\n", ret);
1131                                 goto out;
1132                         }
1133
1134                         if (ret > 0) {
1135                                 ret = spi_transfer_wait(ctlr, msg, xfer);
1136                                 if (ret < 0)
1137                                         msg->status = ret;
1138                         }
1139                 } else {
1140                         if (xfer->len)
1141                                 dev_err(&msg->spi->dev,
1142                                         "Bufferless transfer has length %u\n",
1143                                         xfer->len);
1144                 }
1145
1146                 trace_spi_transfer_stop(msg, xfer);
1147
1148                 if (msg->status != -EINPROGRESS)
1149                         goto out;
1150
1151                 if (xfer->delay_usecs) {
1152                         u16 us = xfer->delay_usecs;
1153
1154                         if (us <= 10)
1155                                 udelay(us);
1156                         else
1157                                 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1158                 }
1159
1160                 if (xfer->cs_change) {
1161                         if (list_is_last(&xfer->transfer_list,
1162                                          &msg->transfers)) {
1163                                 keep_cs = true;
1164                         } else {
1165                                 spi_set_cs(msg->spi, false);
1166                                 udelay(10);
1167                                 spi_set_cs(msg->spi, true);
1168                         }
1169                 }
1170
1171                 msg->actual_length += xfer->len;
1172         }
1173
1174 out:
1175         if (ret != 0 || !keep_cs)
1176                 spi_set_cs(msg->spi, false);
1177
1178         if (msg->status == -EINPROGRESS)
1179                 msg->status = ret;
1180
1181         if (msg->status && ctlr->handle_err)
1182                 ctlr->handle_err(ctlr, msg);
1183
1184         spi_res_release(ctlr, msg);
1185
1186         spi_finalize_current_message(ctlr);
1187
1188         return ret;
1189 }
1190
1191 /**
1192  * spi_finalize_current_transfer - report completion of a transfer
1193  * @ctlr: the controller reporting completion
1194  *
1195  * Called by SPI drivers using the core transfer_one_message()
1196  * implementation to notify it that the current interrupt driven
1197  * transfer has finished and the next one may be scheduled.
1198  */
1199 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1200 {
1201         complete(&ctlr->xfer_completion);
1202 }
1203 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1204
1205 /**
1206  * __spi_pump_messages - function which processes spi message queue
1207  * @ctlr: controller to process queue for
1208  * @in_kthread: true if we are in the context of the message pump thread
1209  *
1210  * This function checks if there is any spi message in the queue that
1211  * needs processing and if so call out to the driver to initialize hardware
1212  * and transfer each message.
1213  *
1214  * Note that it is called both from the kthread itself and also from
1215  * inside spi_sync(); the queue extraction handling at the top of the
1216  * function should deal with this safely.
1217  */
1218 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1219 {
1220         unsigned long flags;
1221         bool was_busy = false;
1222         int ret;
1223
1224         /* Lock queue */
1225         spin_lock_irqsave(&ctlr->queue_lock, flags);
1226
1227         /* Make sure we are not already running a message */
1228         if (ctlr->cur_msg) {
1229                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1230                 return;
1231         }
1232
1233         /* If another context is idling the device then defer */
1234         if (ctlr->idling) {
1235                 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1236                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1237                 return;
1238         }
1239
1240         /* Check if the queue is idle */
1241         if (list_empty(&ctlr->queue) || !ctlr->running) {
1242                 if (!ctlr->busy) {
1243                         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1244                         return;
1245                 }
1246
1247                 /* Only do teardown in the thread */
1248                 if (!in_kthread) {
1249                         kthread_queue_work(&ctlr->kworker,
1250                                            &ctlr->pump_messages);
1251                         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1252                         return;
1253                 }
1254
1255                 ctlr->busy = false;
1256                 ctlr->idling = true;
1257                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1258
1259                 kfree(ctlr->dummy_rx);
1260                 ctlr->dummy_rx = NULL;
1261                 kfree(ctlr->dummy_tx);
1262                 ctlr->dummy_tx = NULL;
1263                 if (ctlr->unprepare_transfer_hardware &&
1264                     ctlr->unprepare_transfer_hardware(ctlr))
1265                         dev_err(&ctlr->dev,
1266                                 "failed to unprepare transfer hardware\n");
1267                 if (ctlr->auto_runtime_pm) {
1268                         pm_runtime_mark_last_busy(ctlr->dev.parent);
1269                         pm_runtime_put_autosuspend(ctlr->dev.parent);
1270                 }
1271                 trace_spi_controller_idle(ctlr);
1272
1273                 spin_lock_irqsave(&ctlr->queue_lock, flags);
1274                 ctlr->idling = false;
1275                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1276                 return;
1277         }
1278
1279         /* Extract head of queue */
1280         ctlr->cur_msg =
1281                 list_first_entry(&ctlr->queue, struct spi_message, queue);
1282
1283         list_del_init(&ctlr->cur_msg->queue);
1284         if (ctlr->busy)
1285                 was_busy = true;
1286         else
1287                 ctlr->busy = true;
1288         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1289
1290         mutex_lock(&ctlr->io_mutex);
1291
1292         if (!was_busy && ctlr->auto_runtime_pm) {
1293                 ret = pm_runtime_get_sync(ctlr->dev.parent);
1294                 if (ret < 0) {
1295                         pm_runtime_put_noidle(ctlr->dev.parent);
1296                         dev_err(&ctlr->dev, "Failed to power device: %d\n",
1297                                 ret);
1298                         mutex_unlock(&ctlr->io_mutex);
1299                         return;
1300                 }
1301         }
1302
1303         if (!was_busy)
1304                 trace_spi_controller_busy(ctlr);
1305
1306         if (!was_busy && ctlr->prepare_transfer_hardware) {
1307                 ret = ctlr->prepare_transfer_hardware(ctlr);
1308                 if (ret) {
1309                         dev_err(&ctlr->dev,
1310                                 "failed to prepare transfer hardware: %d\n",
1311                                 ret);
1312
1313                         if (ctlr->auto_runtime_pm)
1314                                 pm_runtime_put(ctlr->dev.parent);
1315
1316                         ctlr->cur_msg->status = ret;
1317                         spi_finalize_current_message(ctlr);
1318
1319                         mutex_unlock(&ctlr->io_mutex);
1320                         return;
1321                 }
1322         }
1323
1324         trace_spi_message_start(ctlr->cur_msg);
1325
1326         if (ctlr->prepare_message) {
1327                 ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
1328                 if (ret) {
1329                         dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1330                                 ret);
1331                         ctlr->cur_msg->status = ret;
1332                         spi_finalize_current_message(ctlr);
1333                         goto out;
1334                 }
1335                 ctlr->cur_msg_prepared = true;
1336         }
1337
1338         ret = spi_map_msg(ctlr, ctlr->cur_msg);
1339         if (ret) {
1340                 ctlr->cur_msg->status = ret;
1341                 spi_finalize_current_message(ctlr);
1342                 goto out;
1343         }
1344
1345         ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
1346         if (ret) {
1347                 dev_err(&ctlr->dev,
1348                         "failed to transfer one message from queue\n");
1349                 goto out;
1350         }
1351
1352 out:
1353         mutex_unlock(&ctlr->io_mutex);
1354
1355         /* Prod the scheduler in case transfer_one() was busy waiting */
1356         if (!ret)
1357                 cond_resched();
1358 }
1359
1360 /**
1361  * spi_pump_messages - kthread work function which processes spi message queue
1362  * @work: pointer to kthread work struct contained in the controller struct
1363  */
1364 static void spi_pump_messages(struct kthread_work *work)
1365 {
1366         struct spi_controller *ctlr =
1367                 container_of(work, struct spi_controller, pump_messages);
1368
1369         __spi_pump_messages(ctlr, true);
1370 }
1371
1372 static int spi_init_queue(struct spi_controller *ctlr)
1373 {
1374         struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1375
1376         ctlr->running = false;
1377         ctlr->busy = false;
1378
1379         kthread_init_worker(&ctlr->kworker);
1380         ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
1381                                          "%s", dev_name(&ctlr->dev));
1382         if (IS_ERR(ctlr->kworker_task)) {
1383                 dev_err(&ctlr->dev, "failed to create message pump task\n");
1384                 return PTR_ERR(ctlr->kworker_task);
1385         }
1386         kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1387
1388         /*
1389          * Controller config will indicate if this controller should run the
1390          * message pump with high (realtime) priority to reduce the transfer
1391          * latency on the bus by minimising the delay between a transfer
1392          * request and the scheduling of the message pump thread. Without this
1393          * setting the message pump thread will remain at default priority.
1394          */
1395         if (ctlr->rt) {
1396                 dev_info(&ctlr->dev,
1397                         "will run message pump with realtime priority\n");
1398                 sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, &param);
1399         }
1400
1401         return 0;
1402 }
1403
1404 /**
1405  * spi_get_next_queued_message() - called by driver to check for queued
1406  * messages
1407  * @ctlr: the controller to check for queued messages
1408  *
1409  * If there are more messages in the queue, the next message is returned from
1410  * this call.
1411  *
1412  * Return: the next message in the queue, else NULL if the queue is empty.
1413  */
1414 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1415 {
1416         struct spi_message *next;
1417         unsigned long flags;
1418
1419         /* get a pointer to the next message, if any */
1420         spin_lock_irqsave(&ctlr->queue_lock, flags);
1421         next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1422                                         queue);
1423         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1424
1425         return next;
1426 }
1427 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1428
1429 /**
1430  * spi_finalize_current_message() - the current message is complete
1431  * @ctlr: the controller to return the message to
1432  *
1433  * Called by the driver to notify the core that the message in the front of the
1434  * queue is complete and can be removed from the queue.
1435  */
1436 void spi_finalize_current_message(struct spi_controller *ctlr)
1437 {
1438         struct spi_message *mesg;
1439         unsigned long flags;
1440         int ret;
1441
1442         spin_lock_irqsave(&ctlr->queue_lock, flags);
1443         mesg = ctlr->cur_msg;
1444         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1445
1446         spi_unmap_msg(ctlr, mesg);
1447
1448         if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1449                 ret = ctlr->unprepare_message(ctlr, mesg);
1450                 if (ret) {
1451                         dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1452                                 ret);
1453                 }
1454         }
1455
1456         spin_lock_irqsave(&ctlr->queue_lock, flags);
1457         ctlr->cur_msg = NULL;
1458         ctlr->cur_msg_prepared = false;
1459         kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1460         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1461
1462         trace_spi_message_done(mesg);
1463
1464         mesg->state = NULL;
1465         if (mesg->complete)
1466                 mesg->complete(mesg->context);
1467 }
1468 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1469
1470 static int spi_start_queue(struct spi_controller *ctlr)
1471 {
1472         unsigned long flags;
1473
1474         spin_lock_irqsave(&ctlr->queue_lock, flags);
1475
1476         if (ctlr->running || ctlr->busy) {
1477                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1478                 return -EBUSY;
1479         }
1480
1481         ctlr->running = true;
1482         ctlr->cur_msg = NULL;
1483         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1484
1485         kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1486
1487         return 0;
1488 }
1489
1490 static int spi_stop_queue(struct spi_controller *ctlr)
1491 {
1492         unsigned long flags;
1493         unsigned limit = 500;
1494         int ret = 0;
1495
1496         spin_lock_irqsave(&ctlr->queue_lock, flags);
1497
1498         /*
1499          * This is a bit lame, but is optimized for the common execution path.
1500          * A wait_queue on the ctlr->busy could be used, but then the common
1501          * execution path (pump_messages) would be required to call wake_up or
1502          * friends on every SPI message. Do this instead.
1503          */
1504         while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1505                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1506                 usleep_range(10000, 11000);
1507                 spin_lock_irqsave(&ctlr->queue_lock, flags);
1508         }
1509
1510         if (!list_empty(&ctlr->queue) || ctlr->busy)
1511                 ret = -EBUSY;
1512         else
1513                 ctlr->running = false;
1514
1515         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1516
1517         if (ret) {
1518                 dev_warn(&ctlr->dev, "could not stop message queue\n");
1519                 return ret;
1520         }
1521         return ret;
1522 }
1523
1524 static int spi_destroy_queue(struct spi_controller *ctlr)
1525 {
1526         int ret;
1527
1528         ret = spi_stop_queue(ctlr);
1529
1530         /*
1531          * kthread_flush_worker will block until all work is done.
1532          * If the reason that stop_queue timed out is that the work will never
1533          * finish, then it does no good to call flush/stop thread, so
1534          * return anyway.
1535          */
1536         if (ret) {
1537                 dev_err(&ctlr->dev, "problem destroying queue\n");
1538                 return ret;
1539         }
1540
1541         kthread_flush_worker(&ctlr->kworker);
1542         kthread_stop(ctlr->kworker_task);
1543
1544         return 0;
1545 }
1546
1547 static int __spi_queued_transfer(struct spi_device *spi,
1548                                  struct spi_message *msg,
1549                                  bool need_pump)
1550 {
1551         struct spi_controller *ctlr = spi->controller;
1552         unsigned long flags;
1553
1554         spin_lock_irqsave(&ctlr->queue_lock, flags);
1555
1556         if (!ctlr->running) {
1557                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1558                 return -ESHUTDOWN;
1559         }
1560         msg->actual_length = 0;
1561         msg->status = -EINPROGRESS;
1562
1563         list_add_tail(&msg->queue, &ctlr->queue);
1564         if (!ctlr->busy && need_pump)
1565                 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1566
1567         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1568         return 0;
1569 }
1570
1571 /**
1572  * spi_queued_transfer - transfer function for queued transfers
1573  * @spi: spi device which is requesting transfer
1574  * @msg: spi message which is to handled is queued to driver queue
1575  *
1576  * Return: zero on success, else a negative error code.
1577  */
1578 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1579 {
1580         return __spi_queued_transfer(spi, msg, true);
1581 }
1582
1583 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1584 {
1585         int ret;
1586
1587         ctlr->transfer = spi_queued_transfer;
1588         if (!ctlr->transfer_one_message)
1589                 ctlr->transfer_one_message = spi_transfer_one_message;
1590
1591         /* Initialize and start queue */
1592         ret = spi_init_queue(ctlr);
1593         if (ret) {
1594                 dev_err(&ctlr->dev, "problem initializing queue\n");
1595                 goto err_init_queue;
1596         }
1597         ctlr->queued = true;
1598         ret = spi_start_queue(ctlr);
1599         if (ret) {
1600                 dev_err(&ctlr->dev, "problem starting queue\n");
1601                 goto err_start_queue;
1602         }
1603
1604         return 0;
1605
1606 err_start_queue:
1607         spi_destroy_queue(ctlr);
1608 err_init_queue:
1609         return ret;
1610 }
1611
1612 /**
1613  * spi_flush_queue - Send all pending messages in the queue from the callers'
1614  *                   context
1615  * @ctlr: controller to process queue for
1616  *
1617  * This should be used when one wants to ensure all pending messages have been
1618  * sent before doing something. Is used by the spi-mem code to make sure SPI
1619  * memory operations do not preempt regular SPI transfers that have been queued
1620  * before the spi-mem operation.
1621  */
1622 void spi_flush_queue(struct spi_controller *ctlr)
1623 {
1624         if (ctlr->transfer == spi_queued_transfer)
1625                 __spi_pump_messages(ctlr, false);
1626 }
1627
1628 /*-------------------------------------------------------------------------*/
1629
1630 #if defined(CONFIG_OF)
1631 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1632                            struct device_node *nc)
1633 {
1634         u32 value;
1635         int rc;
1636
1637         /* Mode (clock phase/polarity/etc.) */
1638         if (of_property_read_bool(nc, "spi-cpha"))
1639                 spi->mode |= SPI_CPHA;
1640         if (of_property_read_bool(nc, "spi-cpol"))
1641                 spi->mode |= SPI_CPOL;
1642         if (of_property_read_bool(nc, "spi-3wire"))
1643                 spi->mode |= SPI_3WIRE;
1644         if (of_property_read_bool(nc, "spi-lsb-first"))
1645                 spi->mode |= SPI_LSB_FIRST;
1646
1647         /*
1648          * For descriptors associated with the device, polarity inversion is
1649          * handled in the gpiolib, so all chip selects are "active high" in
1650          * the logical sense, the gpiolib will invert the line if need be.
1651          */
1652         if (ctlr->use_gpio_descriptors)
1653                 spi->mode |= SPI_CS_HIGH;
1654         else if (of_property_read_bool(nc, "spi-cs-high"))
1655                 spi->mode |= SPI_CS_HIGH;
1656
1657         /* Device DUAL/QUAD mode */
1658         if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1659                 switch (value) {
1660                 case 1:
1661                         break;
1662                 case 2:
1663                         spi->mode |= SPI_TX_DUAL;
1664                         break;
1665                 case 4:
1666                         spi->mode |= SPI_TX_QUAD;
1667                         break;
1668                 case 8:
1669                         spi->mode |= SPI_TX_OCTAL;
1670                         break;
1671                 default:
1672                         dev_warn(&ctlr->dev,
1673                                 "spi-tx-bus-width %d not supported\n",
1674                                 value);
1675                         break;
1676                 }
1677         }
1678
1679         if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1680                 switch (value) {
1681                 case 1:
1682                         break;
1683                 case 2:
1684                         spi->mode |= SPI_RX_DUAL;
1685                         break;
1686                 case 4:
1687                         spi->mode |= SPI_RX_QUAD;
1688                         break;
1689                 case 8:
1690                         spi->mode |= SPI_RX_OCTAL;
1691                         break;
1692                 default:
1693                         dev_warn(&ctlr->dev,
1694                                 "spi-rx-bus-width %d not supported\n",
1695                                 value);
1696                         break;
1697                 }
1698         }
1699
1700         if (spi_controller_is_slave(ctlr)) {
1701                 if (!of_node_name_eq(nc, "slave")) {
1702                         dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
1703                                 nc);
1704                         return -EINVAL;
1705                 }
1706                 return 0;
1707         }
1708
1709         /* Device address */
1710         rc = of_property_read_u32(nc, "reg", &value);
1711         if (rc) {
1712                 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
1713                         nc, rc);
1714                 return rc;
1715         }
1716         spi->chip_select = value;
1717
1718         /* Device speed */
1719         rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1720         if (rc) {
1721                 dev_err(&ctlr->dev,
1722                         "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc);
1723                 return rc;
1724         }
1725         spi->max_speed_hz = value;
1726
1727         return 0;
1728 }
1729
1730 static struct spi_device *
1731 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
1732 {
1733         struct spi_device *spi;
1734         int rc;
1735
1736         /* Alloc an spi_device */
1737         spi = spi_alloc_device(ctlr);
1738         if (!spi) {
1739                 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
1740                 rc = -ENOMEM;
1741                 goto err_out;
1742         }
1743
1744         /* Select device driver */
1745         rc = of_modalias_node(nc, spi->modalias,
1746                                 sizeof(spi->modalias));
1747         if (rc < 0) {
1748                 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
1749                 goto err_out;
1750         }
1751
1752         rc = of_spi_parse_dt(ctlr, spi, nc);
1753         if (rc)
1754                 goto err_out;
1755
1756         /* Store a pointer to the node in the device structure */
1757         of_node_get(nc);
1758         spi->dev.of_node = nc;
1759
1760         /* Register the new device */
1761         rc = spi_add_device(spi);
1762         if (rc) {
1763                 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
1764                 goto err_of_node_put;
1765         }
1766
1767         return spi;
1768
1769 err_of_node_put:
1770         of_node_put(nc);
1771 err_out:
1772         spi_dev_put(spi);
1773         return ERR_PTR(rc);
1774 }
1775
1776 /**
1777  * of_register_spi_devices() - Register child devices onto the SPI bus
1778  * @ctlr:       Pointer to spi_controller device
1779  *
1780  * Registers an spi_device for each child node of controller node which
1781  * represents a valid SPI slave.
1782  */
1783 static void of_register_spi_devices(struct spi_controller *ctlr)
1784 {
1785         struct spi_device *spi;
1786         struct device_node *nc;
1787
1788         if (!ctlr->dev.of_node)
1789                 return;
1790
1791         for_each_available_child_of_node(ctlr->dev.of_node, nc) {
1792                 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1793                         continue;
1794                 spi = of_register_spi_device(ctlr, nc);
1795                 if (IS_ERR(spi)) {
1796                         dev_warn(&ctlr->dev,
1797                                  "Failed to create SPI device for %pOF\n", nc);
1798                         of_node_clear_flag(nc, OF_POPULATED);
1799                 }
1800         }
1801 }
1802 #else
1803 static void of_register_spi_devices(struct spi_controller *ctlr) { }
1804 #endif
1805
1806 #ifdef CONFIG_ACPI
1807 static void acpi_spi_parse_apple_properties(struct spi_device *spi)
1808 {
1809         struct acpi_device *dev = ACPI_COMPANION(&spi->dev);
1810         const union acpi_object *obj;
1811
1812         if (!x86_apple_machine)
1813                 return;
1814
1815         if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
1816             && obj->buffer.length >= 4)
1817                 spi->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
1818
1819         if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
1820             && obj->buffer.length == 8)
1821                 spi->bits_per_word = *(u64 *)obj->buffer.pointer;
1822
1823         if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
1824             && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
1825                 spi->mode |= SPI_LSB_FIRST;
1826
1827         if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
1828             && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
1829                 spi->mode |= SPI_CPOL;
1830
1831         if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
1832             && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
1833                 spi->mode |= SPI_CPHA;
1834 }
1835
1836 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1837 {
1838         struct spi_device *spi = data;
1839         struct spi_controller *ctlr = spi->controller;
1840
1841         if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1842                 struct acpi_resource_spi_serialbus *sb;
1843
1844                 sb = &ares->data.spi_serial_bus;
1845                 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1846                         /*
1847                          * ACPI DeviceSelection numbering is handled by the
1848                          * host controller driver in Windows and can vary
1849                          * from driver to driver. In Linux we always expect
1850                          * 0 .. max - 1 so we need to ask the driver to
1851                          * translate between the two schemes.
1852                          */
1853                         if (ctlr->fw_translate_cs) {
1854                                 int cs = ctlr->fw_translate_cs(ctlr,
1855                                                 sb->device_selection);
1856                                 if (cs < 0)
1857                                         return cs;
1858                                 spi->chip_select = cs;
1859                         } else {
1860                                 spi->chip_select = sb->device_selection;
1861                         }
1862
1863                         spi->max_speed_hz = sb->connection_speed;
1864
1865                         if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1866                                 spi->mode |= SPI_CPHA;
1867                         if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1868                                 spi->mode |= SPI_CPOL;
1869                         if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1870                                 spi->mode |= SPI_CS_HIGH;
1871                 }
1872         } else if (spi->irq < 0) {
1873                 struct resource r;
1874
1875                 if (acpi_dev_resource_interrupt(ares, 0, &r))
1876                         spi->irq = r.start;
1877         }
1878
1879         /* Always tell the ACPI core to skip this resource */
1880         return 1;
1881 }
1882
1883 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
1884                                             struct acpi_device *adev)
1885 {
1886         struct list_head resource_list;
1887         struct spi_device *spi;
1888         int ret;
1889
1890         if (acpi_bus_get_status(adev) || !adev->status.present ||
1891             acpi_device_enumerated(adev))
1892                 return AE_OK;
1893
1894         spi = spi_alloc_device(ctlr);
1895         if (!spi) {
1896                 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
1897                         dev_name(&adev->dev));
1898                 return AE_NO_MEMORY;
1899         }
1900
1901         ACPI_COMPANION_SET(&spi->dev, adev);
1902         spi->irq = -1;
1903
1904         INIT_LIST_HEAD(&resource_list);
1905         ret = acpi_dev_get_resources(adev, &resource_list,
1906                                      acpi_spi_add_resource, spi);
1907         acpi_dev_free_resource_list(&resource_list);
1908
1909         acpi_spi_parse_apple_properties(spi);
1910
1911         if (ret < 0 || !spi->max_speed_hz) {
1912                 spi_dev_put(spi);
1913                 return AE_OK;
1914         }
1915
1916         acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
1917                           sizeof(spi->modalias));
1918
1919         if (spi->irq < 0)
1920                 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
1921
1922         acpi_device_set_enumerated(adev);
1923
1924         adev->power.flags.ignore_parent = true;
1925         if (spi_add_device(spi)) {
1926                 adev->power.flags.ignore_parent = false;
1927                 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
1928                         dev_name(&adev->dev));
1929                 spi_dev_put(spi);
1930         }
1931
1932         return AE_OK;
1933 }
1934
1935 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1936                                        void *data, void **return_value)
1937 {
1938         struct spi_controller *ctlr = data;
1939         struct acpi_device *adev;
1940
1941         if (acpi_bus_get_device(handle, &adev))
1942                 return AE_OK;
1943
1944         return acpi_register_spi_device(ctlr, adev);
1945 }
1946
1947 static void acpi_register_spi_devices(struct spi_controller *ctlr)
1948 {
1949         acpi_status status;
1950         acpi_handle handle;
1951
1952         handle = ACPI_HANDLE(ctlr->dev.parent);
1953         if (!handle)
1954                 return;
1955
1956         status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1957                                      acpi_spi_add_device, NULL, ctlr, NULL);
1958         if (ACPI_FAILURE(status))
1959                 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
1960 }
1961 #else
1962 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
1963 #endif /* CONFIG_ACPI */
1964
1965 static void spi_controller_release(struct device *dev)
1966 {
1967         struct spi_controller *ctlr;
1968
1969         ctlr = container_of(dev, struct spi_controller, dev);
1970         kfree(ctlr);
1971 }
1972
1973 static struct class spi_master_class = {
1974         .name           = "spi_master",
1975         .owner          = THIS_MODULE,
1976         .dev_release    = spi_controller_release,
1977         .dev_groups     = spi_master_groups,
1978 };
1979
1980 #ifdef CONFIG_SPI_SLAVE
1981 /**
1982  * spi_slave_abort - abort the ongoing transfer request on an SPI slave
1983  *                   controller
1984  * @spi: device used for the current transfer
1985  */
1986 int spi_slave_abort(struct spi_device *spi)
1987 {
1988         struct spi_controller *ctlr = spi->controller;
1989
1990         if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
1991                 return ctlr->slave_abort(ctlr);
1992
1993         return -ENOTSUPP;
1994 }
1995 EXPORT_SYMBOL_GPL(spi_slave_abort);
1996
1997 static int match_true(struct device *dev, void *data)
1998 {
1999         return 1;
2000 }
2001
2002 static ssize_t spi_slave_show(struct device *dev,
2003                               struct device_attribute *attr, char *buf)
2004 {
2005         struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2006                                                    dev);
2007         struct device *child;
2008
2009         child = device_find_child(&ctlr->dev, NULL, match_true);
2010         return sprintf(buf, "%s\n",
2011                        child ? to_spi_device(child)->modalias : NULL);
2012 }
2013
2014 static ssize_t spi_slave_store(struct device *dev,
2015                                struct device_attribute *attr, const char *buf,
2016                                size_t count)
2017 {
2018         struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2019                                                    dev);
2020         struct spi_device *spi;
2021         struct device *child;
2022         char name[32];
2023         int rc;
2024
2025         rc = sscanf(buf, "%31s", name);
2026         if (rc != 1 || !name[0])
2027                 return -EINVAL;
2028
2029         child = device_find_child(&ctlr->dev, NULL, match_true);
2030         if (child) {
2031                 /* Remove registered slave */
2032                 device_unregister(child);
2033                 put_device(child);
2034         }
2035
2036         if (strcmp(name, "(null)")) {
2037                 /* Register new slave */
2038                 spi = spi_alloc_device(ctlr);
2039                 if (!spi)
2040                         return -ENOMEM;
2041
2042                 strlcpy(spi->modalias, name, sizeof(spi->modalias));
2043
2044                 rc = spi_add_device(spi);
2045                 if (rc) {
2046                         spi_dev_put(spi);
2047                         return rc;
2048                 }
2049         }
2050
2051         return count;
2052 }
2053
2054 static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
2055
2056 static struct attribute *spi_slave_attrs[] = {
2057         &dev_attr_slave.attr,
2058         NULL,
2059 };
2060
2061 static const struct attribute_group spi_slave_group = {
2062         .attrs = spi_slave_attrs,
2063 };
2064
2065 static const struct attribute_group *spi_slave_groups[] = {
2066         &spi_controller_statistics_group,
2067         &spi_slave_group,
2068         NULL,
2069 };
2070
2071 static struct class spi_slave_class = {
2072         .name           = "spi_slave",
2073         .owner          = THIS_MODULE,
2074         .dev_release    = spi_controller_release,
2075         .dev_groups     = spi_slave_groups,
2076 };
2077 #else
2078 extern struct class spi_slave_class;    /* dummy */
2079 #endif
2080
2081 /**
2082  * __spi_alloc_controller - allocate an SPI master or slave controller
2083  * @dev: the controller, possibly using the platform_bus
2084  * @size: how much zeroed driver-private data to allocate; the pointer to this
2085  *      memory is in the driver_data field of the returned device,
2086  *      accessible with spi_controller_get_devdata().
2087  * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2088  *      slave (true) controller
2089  * Context: can sleep
2090  *
2091  * This call is used only by SPI controller drivers, which are the
2092  * only ones directly touching chip registers.  It's how they allocate
2093  * an spi_controller structure, prior to calling spi_register_controller().
2094  *
2095  * This must be called from context that can sleep.
2096  *
2097  * The caller is responsible for assigning the bus number and initializing the
2098  * controller's methods before calling spi_register_controller(); and (after
2099  * errors adding the device) calling spi_controller_put() to prevent a memory
2100  * leak.
2101  *
2102  * Return: the SPI controller structure on success, else NULL.
2103  */
2104 struct spi_controller *__spi_alloc_controller(struct device *dev,
2105                                               unsigned int size, bool slave)
2106 {
2107         struct spi_controller   *ctlr;
2108
2109         if (!dev)
2110                 return NULL;
2111
2112         ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
2113         if (!ctlr)
2114                 return NULL;
2115
2116         device_initialize(&ctlr->dev);
2117         ctlr->bus_num = -1;
2118         ctlr->num_chipselect = 1;
2119         ctlr->slave = slave;
2120         if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2121                 ctlr->dev.class = &spi_slave_class;
2122         else
2123                 ctlr->dev.class = &spi_master_class;
2124         ctlr->dev.parent = dev;
2125         pm_suspend_ignore_children(&ctlr->dev, true);
2126         spi_controller_set_devdata(ctlr, &ctlr[1]);
2127
2128         return ctlr;
2129 }
2130 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2131
2132 #ifdef CONFIG_OF
2133 static int of_spi_register_master(struct spi_controller *ctlr)
2134 {
2135         int nb, i, *cs;
2136         struct device_node *np = ctlr->dev.of_node;
2137
2138         if (!np)
2139                 return 0;
2140
2141         nb = of_gpio_named_count(np, "cs-gpios");
2142         ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2143
2144         /* Return error only for an incorrectly formed cs-gpios property */
2145         if (nb == 0 || nb == -ENOENT)
2146                 return 0;
2147         else if (nb < 0)
2148                 return nb;
2149
2150         cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
2151                           GFP_KERNEL);
2152         ctlr->cs_gpios = cs;
2153
2154         if (!ctlr->cs_gpios)
2155                 return -ENOMEM;
2156
2157         for (i = 0; i < ctlr->num_chipselect; i++)
2158                 cs[i] = -ENOENT;
2159
2160         for (i = 0; i < nb; i++)
2161                 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
2162
2163         return 0;
2164 }
2165 #else
2166 static int of_spi_register_master(struct spi_controller *ctlr)
2167 {
2168         return 0;
2169 }
2170 #endif
2171
2172 /**
2173  * spi_get_gpio_descs() - grab chip select GPIOs for the master
2174  * @ctlr: The SPI master to grab GPIO descriptors for
2175  */
2176 static int spi_get_gpio_descs(struct spi_controller *ctlr)
2177 {
2178         int nb, i;
2179         struct gpio_desc **cs;
2180         struct device *dev = &ctlr->dev;
2181
2182         nb = gpiod_count(dev, "cs");
2183         ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2184
2185         /* No GPIOs at all is fine, else return the error */
2186         if (nb == 0 || nb == -ENOENT)
2187                 return 0;
2188         else if (nb < 0)
2189                 return nb;
2190
2191         cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2192                           GFP_KERNEL);
2193         if (!cs)
2194                 return -ENOMEM;
2195         ctlr->cs_gpiods = cs;
2196
2197         for (i = 0; i < nb; i++) {
2198                 /*
2199                  * Most chipselects are active low, the inverted
2200                  * semantics are handled by special quirks in gpiolib,
2201                  * so initializing them GPIOD_OUT_LOW here means
2202                  * "unasserted", in most cases this will drive the physical
2203                  * line high.
2204                  */
2205                 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2206                                                       GPIOD_OUT_LOW);
2207                 if (IS_ERR(cs[i]))
2208                         return PTR_ERR(cs[i]);
2209
2210                 if (cs[i]) {
2211                         /*
2212                          * If we find a CS GPIO, name it after the device and
2213                          * chip select line.
2214                          */
2215                         char *gpioname;
2216
2217                         gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
2218                                                   dev_name(dev), i);
2219                         if (!gpioname)
2220                                 return -ENOMEM;
2221                         gpiod_set_consumer_name(cs[i], gpioname);
2222                 }
2223         }
2224
2225         return 0;
2226 }
2227
2228 static int spi_controller_check_ops(struct spi_controller *ctlr)
2229 {
2230         /*
2231          * The controller may implement only the high-level SPI-memory like
2232          * operations if it does not support regular SPI transfers, and this is
2233          * valid use case.
2234          * If ->mem_ops is NULL, we request that at least one of the
2235          * ->transfer_xxx() method be implemented.
2236          */
2237         if (ctlr->mem_ops) {
2238                 if (!ctlr->mem_ops->exec_op)
2239                         return -EINVAL;
2240         } else if (!ctlr->transfer && !ctlr->transfer_one &&
2241                    !ctlr->transfer_one_message) {
2242                 return -EINVAL;
2243         }
2244
2245         return 0;
2246 }
2247
2248 /**
2249  * spi_register_controller - register SPI master or slave controller
2250  * @ctlr: initialized master, originally from spi_alloc_master() or
2251  *      spi_alloc_slave()
2252  * Context: can sleep
2253  *
2254  * SPI controllers connect to their drivers using some non-SPI bus,
2255  * such as the platform bus.  The final stage of probe() in that code
2256  * includes calling spi_register_controller() to hook up to this SPI bus glue.
2257  *
2258  * SPI controllers use board specific (often SOC specific) bus numbers,
2259  * and board-specific addressing for SPI devices combines those numbers
2260  * with chip select numbers.  Since SPI does not directly support dynamic
2261  * device identification, boards need configuration tables telling which
2262  * chip is at which address.
2263  *
2264  * This must be called from context that can sleep.  It returns zero on
2265  * success, else a negative error code (dropping the controller's refcount).
2266  * After a successful return, the caller is responsible for calling
2267  * spi_unregister_controller().
2268  *
2269  * Return: zero on success, else a negative error code.
2270  */
2271 int spi_register_controller(struct spi_controller *ctlr)
2272 {
2273         struct device           *dev = ctlr->dev.parent;
2274         struct boardinfo        *bi;
2275         int                     status;
2276         int                     id, first_dynamic;
2277
2278         if (!dev)
2279                 return -ENODEV;
2280
2281         /*
2282          * Make sure all necessary hooks are implemented before registering
2283          * the SPI controller.
2284          */
2285         status = spi_controller_check_ops(ctlr);
2286         if (status)
2287                 return status;
2288
2289         /* even if it's just one always-selected device, there must
2290          * be at least one chipselect
2291          */
2292         if (ctlr->num_chipselect == 0)
2293                 return -EINVAL;
2294         if (ctlr->bus_num >= 0) {
2295                 /* devices with a fixed bus num must check-in with the num */
2296                 mutex_lock(&board_lock);
2297                 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2298                         ctlr->bus_num + 1, GFP_KERNEL);
2299                 mutex_unlock(&board_lock);
2300                 if (WARN(id < 0, "couldn't get idr"))
2301                         return id == -ENOSPC ? -EBUSY : id;
2302                 ctlr->bus_num = id;
2303         } else if (ctlr->dev.of_node) {
2304                 /* allocate dynamic bus number using Linux idr */
2305                 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2306                 if (id >= 0) {
2307                         ctlr->bus_num = id;
2308                         mutex_lock(&board_lock);
2309                         id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2310                                        ctlr->bus_num + 1, GFP_KERNEL);
2311                         mutex_unlock(&board_lock);
2312                         if (WARN(id < 0, "couldn't get idr"))
2313                                 return id == -ENOSPC ? -EBUSY : id;
2314                 }
2315         }
2316         if (ctlr->bus_num < 0) {
2317                 first_dynamic = of_alias_get_highest_id("spi");
2318                 if (first_dynamic < 0)
2319                         first_dynamic = 0;
2320                 else
2321                         first_dynamic++;
2322
2323                 mutex_lock(&board_lock);
2324                 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2325                                0, GFP_KERNEL);
2326                 mutex_unlock(&board_lock);
2327                 if (WARN(id < 0, "couldn't get idr"))
2328                         return id;
2329                 ctlr->bus_num = id;
2330         }
2331         INIT_LIST_HEAD(&ctlr->queue);
2332         spin_lock_init(&ctlr->queue_lock);
2333         spin_lock_init(&ctlr->bus_lock_spinlock);
2334         mutex_init(&ctlr->bus_lock_mutex);
2335         mutex_init(&ctlr->io_mutex);
2336         ctlr->bus_lock_flag = 0;
2337         init_completion(&ctlr->xfer_completion);
2338         if (!ctlr->max_dma_len)
2339                 ctlr->max_dma_len = INT_MAX;
2340
2341         /* register the device, then userspace will see it.
2342          * registration fails if the bus ID is in use.
2343          */
2344         dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2345
2346         if (!spi_controller_is_slave(ctlr)) {
2347                 if (ctlr->use_gpio_descriptors) {
2348                         status = spi_get_gpio_descs(ctlr);
2349                         if (status)
2350                                 return status;
2351                         /*
2352                          * A controller using GPIO descriptors always
2353                          * supports SPI_CS_HIGH if need be.
2354                          */
2355                         ctlr->mode_bits |= SPI_CS_HIGH;
2356                 } else {
2357                         /* Legacy code path for GPIOs from DT */
2358                         status = of_spi_register_master(ctlr);
2359                         if (status)
2360                                 return status;
2361                 }
2362         }
2363
2364         status = device_add(&ctlr->dev);
2365         if (status < 0) {
2366                 /* free bus id */
2367                 mutex_lock(&board_lock);
2368                 idr_remove(&spi_master_idr, ctlr->bus_num);
2369                 mutex_unlock(&board_lock);
2370                 goto done;
2371         }
2372         dev_dbg(dev, "registered %s %s\n",
2373                         spi_controller_is_slave(ctlr) ? "slave" : "master",
2374                         dev_name(&ctlr->dev));
2375
2376         /*
2377          * If we're using a queued driver, start the queue. Note that we don't
2378          * need the queueing logic if the driver is only supporting high-level
2379          * memory operations.
2380          */
2381         if (ctlr->transfer) {
2382                 dev_info(dev, "controller is unqueued, this is deprecated\n");
2383         } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
2384                 status = spi_controller_initialize_queue(ctlr);
2385                 if (status) {
2386                         device_del(&ctlr->dev);
2387                         /* free bus id */
2388                         mutex_lock(&board_lock);
2389                         idr_remove(&spi_master_idr, ctlr->bus_num);
2390                         mutex_unlock(&board_lock);
2391                         goto done;
2392                 }
2393         }
2394         /* add statistics */
2395         spin_lock_init(&ctlr->statistics.lock);
2396
2397         mutex_lock(&board_lock);
2398         list_add_tail(&ctlr->list, &spi_controller_list);
2399         list_for_each_entry(bi, &board_list, list)
2400                 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2401         mutex_unlock(&board_lock);
2402
2403         /* Register devices from the device tree and ACPI */
2404         of_register_spi_devices(ctlr);
2405         acpi_register_spi_devices(ctlr);
2406 done:
2407         return status;
2408 }
2409 EXPORT_SYMBOL_GPL(spi_register_controller);
2410
2411 static void devm_spi_unregister(struct device *dev, void *res)
2412 {
2413         spi_unregister_controller(*(struct spi_controller **)res);
2414 }
2415
2416 /**
2417  * devm_spi_register_controller - register managed SPI master or slave
2418  *      controller
2419  * @dev:    device managing SPI controller
2420  * @ctlr: initialized controller, originally from spi_alloc_master() or
2421  *      spi_alloc_slave()
2422  * Context: can sleep
2423  *
2424  * Register a SPI device as with spi_register_controller() which will
2425  * automatically be unregistered and freed.
2426  *
2427  * Return: zero on success, else a negative error code.
2428  */
2429 int devm_spi_register_controller(struct device *dev,
2430                                  struct spi_controller *ctlr)
2431 {
2432         struct spi_controller **ptr;
2433         int ret;
2434
2435         ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2436         if (!ptr)
2437                 return -ENOMEM;
2438
2439         ret = spi_register_controller(ctlr);
2440         if (!ret) {
2441                 *ptr = ctlr;
2442                 devres_add(dev, ptr);
2443         } else {
2444                 devres_free(ptr);
2445         }
2446
2447         return ret;
2448 }
2449 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2450
2451 static int __unregister(struct device *dev, void *null)
2452 {
2453         spi_unregister_device(to_spi_device(dev));
2454         return 0;
2455 }
2456
2457 /**
2458  * spi_unregister_controller - unregister SPI master or slave controller
2459  * @ctlr: the controller being unregistered
2460  * Context: can sleep
2461  *
2462  * This call is used only by SPI controller drivers, which are the
2463  * only ones directly touching chip registers.
2464  *
2465  * This must be called from context that can sleep.
2466  *
2467  * Note that this function also drops a reference to the controller.
2468  */
2469 void spi_unregister_controller(struct spi_controller *ctlr)
2470 {
2471         struct spi_controller *found;
2472         int id = ctlr->bus_num;
2473         int dummy;
2474
2475         /* First make sure that this controller was ever added */
2476         mutex_lock(&board_lock);
2477         found = idr_find(&spi_master_idr, id);
2478         mutex_unlock(&board_lock);
2479         if (ctlr->queued) {
2480                 if (spi_destroy_queue(ctlr))
2481                         dev_err(&ctlr->dev, "queue remove failed\n");
2482         }
2483         mutex_lock(&board_lock);
2484         list_del(&ctlr->list);
2485         mutex_unlock(&board_lock);
2486
2487         dummy = device_for_each_child(&ctlr->dev, NULL, __unregister);
2488         device_unregister(&ctlr->dev);
2489         /* free bus id */
2490         mutex_lock(&board_lock);
2491         if (found == ctlr)
2492                 idr_remove(&spi_master_idr, id);
2493         mutex_unlock(&board_lock);
2494 }
2495 EXPORT_SYMBOL_GPL(spi_unregister_controller);
2496
2497 int spi_controller_suspend(struct spi_controller *ctlr)
2498 {
2499         int ret;
2500
2501         /* Basically no-ops for non-queued controllers */
2502         if (!ctlr->queued)
2503                 return 0;
2504
2505         ret = spi_stop_queue(ctlr);
2506         if (ret)
2507                 dev_err(&ctlr->dev, "queue stop failed\n");
2508
2509         return ret;
2510 }
2511 EXPORT_SYMBOL_GPL(spi_controller_suspend);
2512
2513 int spi_controller_resume(struct spi_controller *ctlr)
2514 {
2515         int ret;
2516
2517         if (!ctlr->queued)
2518                 return 0;
2519
2520         ret = spi_start_queue(ctlr);
2521         if (ret)
2522                 dev_err(&ctlr->dev, "queue restart failed\n");
2523
2524         return ret;
2525 }
2526 EXPORT_SYMBOL_GPL(spi_controller_resume);
2527
2528 static int __spi_controller_match(struct device *dev, const void *data)
2529 {
2530         struct spi_controller *ctlr;
2531         const u16 *bus_num = data;
2532
2533         ctlr = container_of(dev, struct spi_controller, dev);
2534         return ctlr->bus_num == *bus_num;
2535 }
2536
2537 /**
2538  * spi_busnum_to_master - look up master associated with bus_num
2539  * @bus_num: the master's bus number
2540  * Context: can sleep
2541  *
2542  * This call may be used with devices that are registered after
2543  * arch init time.  It returns a refcounted pointer to the relevant
2544  * spi_controller (which the caller must release), or NULL if there is
2545  * no such master registered.
2546  *
2547  * Return: the SPI master structure on success, else NULL.
2548  */
2549 struct spi_controller *spi_busnum_to_master(u16 bus_num)
2550 {
2551         struct device           *dev;
2552         struct spi_controller   *ctlr = NULL;
2553
2554         dev = class_find_device(&spi_master_class, NULL, &bus_num,
2555                                 __spi_controller_match);
2556         if (dev)
2557                 ctlr = container_of(dev, struct spi_controller, dev);
2558         /* reference got in class_find_device */
2559         return ctlr;
2560 }
2561 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2562
2563 /*-------------------------------------------------------------------------*/
2564
2565 /* Core methods for SPI resource management */
2566
2567 /**
2568  * spi_res_alloc - allocate a spi resource that is life-cycle managed
2569  *                 during the processing of a spi_message while using
2570  *                 spi_transfer_one
2571  * @spi:     the spi device for which we allocate memory
2572  * @release: the release code to execute for this resource
2573  * @size:    size to alloc and return
2574  * @gfp:     GFP allocation flags
2575  *
2576  * Return: the pointer to the allocated data
2577  *
2578  * This may get enhanced in the future to allocate from a memory pool
2579  * of the @spi_device or @spi_controller to avoid repeated allocations.
2580  */
2581 void *spi_res_alloc(struct spi_device *spi,
2582                     spi_res_release_t release,
2583                     size_t size, gfp_t gfp)
2584 {
2585         struct spi_res *sres;
2586
2587         sres = kzalloc(sizeof(*sres) + size, gfp);
2588         if (!sres)
2589                 return NULL;
2590
2591         INIT_LIST_HEAD(&sres->entry);
2592         sres->release = release;
2593
2594         return sres->data;
2595 }
2596 EXPORT_SYMBOL_GPL(spi_res_alloc);
2597
2598 /**
2599  * spi_res_free - free an spi resource
2600  * @res: pointer to the custom data of a resource
2601  *
2602  */
2603 void spi_res_free(void *res)
2604 {
2605         struct spi_res *sres = container_of(res, struct spi_res, data);
2606
2607         if (!res)
2608                 return;
2609
2610         WARN_ON(!list_empty(&sres->entry));
2611         kfree(sres);
2612 }
2613 EXPORT_SYMBOL_GPL(spi_res_free);
2614
2615 /**
2616  * spi_res_add - add a spi_res to the spi_message
2617  * @message: the spi message
2618  * @res:     the spi_resource
2619  */
2620 void spi_res_add(struct spi_message *message, void *res)
2621 {
2622         struct spi_res *sres = container_of(res, struct spi_res, data);
2623
2624         WARN_ON(!list_empty(&sres->entry));
2625         list_add_tail(&sres->entry, &message->resources);
2626 }
2627 EXPORT_SYMBOL_GPL(spi_res_add);
2628
2629 /**
2630  * spi_res_release - release all spi resources for this message
2631  * @ctlr:  the @spi_controller
2632  * @message: the @spi_message
2633  */
2634 void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
2635 {
2636         struct spi_res *res;
2637
2638         while (!list_empty(&message->resources)) {
2639                 res = list_last_entry(&message->resources,
2640                                       struct spi_res, entry);
2641
2642                 if (res->release)
2643                         res->release(ctlr, message, res->data);
2644
2645                 list_del(&res->entry);
2646
2647                 kfree(res);
2648         }
2649 }
2650 EXPORT_SYMBOL_GPL(spi_res_release);
2651
2652 /*-------------------------------------------------------------------------*/
2653
2654 /* Core methods for spi_message alterations */
2655
2656 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
2657                                             struct spi_message *msg,
2658                                             void *res)
2659 {
2660         struct spi_replaced_transfers *rxfer = res;
2661         size_t i;
2662
2663         /* call extra callback if requested */
2664         if (rxfer->release)
2665                 rxfer->release(ctlr, msg, res);
2666
2667         /* insert replaced transfers back into the message */
2668         list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2669
2670         /* remove the formerly inserted entries */
2671         for (i = 0; i < rxfer->inserted; i++)
2672                 list_del(&rxfer->inserted_transfers[i].transfer_list);
2673 }
2674
2675 /**
2676  * spi_replace_transfers - replace transfers with several transfers
2677  *                         and register change with spi_message.resources
2678  * @msg:           the spi_message we work upon
2679  * @xfer_first:    the first spi_transfer we want to replace
2680  * @remove:        number of transfers to remove
2681  * @insert:        the number of transfers we want to insert instead
2682  * @release:       extra release code necessary in some circumstances
2683  * @extradatasize: extra data to allocate (with alignment guarantees
2684  *                 of struct @spi_transfer)
2685  * @gfp:           gfp flags
2686  *
2687  * Returns: pointer to @spi_replaced_transfers,
2688  *          PTR_ERR(...) in case of errors.
2689  */
2690 struct spi_replaced_transfers *spi_replace_transfers(
2691         struct spi_message *msg,
2692         struct spi_transfer *xfer_first,
2693         size_t remove,
2694         size_t insert,
2695         spi_replaced_release_t release,
2696         size_t extradatasize,
2697         gfp_t gfp)
2698 {
2699         struct spi_replaced_transfers *rxfer;
2700         struct spi_transfer *xfer;
2701         size_t i;
2702
2703         /* allocate the structure using spi_res */
2704         rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2705                               insert * sizeof(struct spi_transfer)
2706                               + sizeof(struct spi_replaced_transfers)
2707                               + extradatasize,
2708                               gfp);
2709         if (!rxfer)
2710                 return ERR_PTR(-ENOMEM);
2711
2712         /* the release code to invoke before running the generic release */
2713         rxfer->release = release;
2714
2715         /* assign extradata */
2716         if (extradatasize)
2717                 rxfer->extradata =
2718                         &rxfer->inserted_transfers[insert];
2719
2720         /* init the replaced_transfers list */
2721         INIT_LIST_HEAD(&rxfer->replaced_transfers);
2722
2723         /* assign the list_entry after which we should reinsert
2724          * the @replaced_transfers - it may be spi_message.messages!
2725          */
2726         rxfer->replaced_after = xfer_first->transfer_list.prev;
2727
2728         /* remove the requested number of transfers */
2729         for (i = 0; i < remove; i++) {
2730                 /* if the entry after replaced_after it is msg->transfers
2731                  * then we have been requested to remove more transfers
2732                  * than are in the list
2733                  */
2734                 if (rxfer->replaced_after->next == &msg->transfers) {
2735                         dev_err(&msg->spi->dev,
2736                                 "requested to remove more spi_transfers than are available\n");
2737                         /* insert replaced transfers back into the message */
2738                         list_splice(&rxfer->replaced_transfers,
2739                                     rxfer->replaced_after);
2740
2741                         /* free the spi_replace_transfer structure */
2742                         spi_res_free(rxfer);
2743
2744                         /* and return with an error */
2745                         return ERR_PTR(-EINVAL);
2746                 }
2747
2748                 /* remove the entry after replaced_after from list of
2749                  * transfers and add it to list of replaced_transfers
2750                  */
2751                 list_move_tail(rxfer->replaced_after->next,
2752                                &rxfer->replaced_transfers);
2753         }
2754
2755         /* create copy of the given xfer with identical settings
2756          * based on the first transfer to get removed
2757          */
2758         for (i = 0; i < insert; i++) {
2759                 /* we need to run in reverse order */
2760                 xfer = &rxfer->inserted_transfers[insert - 1 - i];
2761
2762                 /* copy all spi_transfer data */
2763                 memcpy(xfer, xfer_first, sizeof(*xfer));
2764
2765                 /* add to list */
2766                 list_add(&xfer->transfer_list, rxfer->replaced_after);
2767
2768                 /* clear cs_change and delay_usecs for all but the last */
2769                 if (i) {
2770                         xfer->cs_change = false;
2771                         xfer->delay_usecs = 0;
2772                 }
2773         }
2774
2775         /* set up inserted */
2776         rxfer->inserted = insert;
2777
2778         /* and register it with spi_res/spi_message */
2779         spi_res_add(msg, rxfer);
2780
2781         return rxfer;
2782 }
2783 EXPORT_SYMBOL_GPL(spi_replace_transfers);
2784
2785 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
2786                                         struct spi_message *msg,
2787                                         struct spi_transfer **xferp,
2788                                         size_t maxsize,
2789                                         gfp_t gfp)
2790 {
2791         struct spi_transfer *xfer = *xferp, *xfers;
2792         struct spi_replaced_transfers *srt;
2793         size_t offset;
2794         size_t count, i;
2795
2796         /* calculate how many we have to replace */
2797         count = DIV_ROUND_UP(xfer->len, maxsize);
2798
2799         /* create replacement */
2800         srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2801         if (IS_ERR(srt))
2802                 return PTR_ERR(srt);
2803         xfers = srt->inserted_transfers;
2804
2805         /* now handle each of those newly inserted spi_transfers
2806          * note that the replacements spi_transfers all are preset
2807          * to the same values as *xferp, so tx_buf, rx_buf and len
2808          * are all identical (as well as most others)
2809          * so we just have to fix up len and the pointers.
2810          *
2811          * this also includes support for the depreciated
2812          * spi_message.is_dma_mapped interface
2813          */
2814
2815         /* the first transfer just needs the length modified, so we
2816          * run it outside the loop
2817          */
2818         xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2819
2820         /* all the others need rx_buf/tx_buf also set */
2821         for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2822                 /* update rx_buf, tx_buf and dma */
2823                 if (xfers[i].rx_buf)
2824                         xfers[i].rx_buf += offset;
2825                 if (xfers[i].rx_dma)
2826                         xfers[i].rx_dma += offset;
2827                 if (xfers[i].tx_buf)
2828                         xfers[i].tx_buf += offset;
2829                 if (xfers[i].tx_dma)
2830                         xfers[i].tx_dma += offset;
2831
2832                 /* update length */
2833                 xfers[i].len = min(maxsize, xfers[i].len - offset);
2834         }
2835
2836         /* we set up xferp to the last entry we have inserted,
2837          * so that we skip those already split transfers
2838          */
2839         *xferp = &xfers[count - 1];
2840
2841         /* increment statistics counters */
2842         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
2843                                        transfers_split_maxsize);
2844         SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2845                                        transfers_split_maxsize);
2846
2847         return 0;
2848 }
2849
2850 /**
2851  * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2852  *                              when an individual transfer exceeds a
2853  *                              certain size
2854  * @ctlr:    the @spi_controller for this transfer
2855  * @msg:   the @spi_message to transform
2856  * @maxsize:  the maximum when to apply this
2857  * @gfp: GFP allocation flags
2858  *
2859  * Return: status of transformation
2860  */
2861 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
2862                                 struct spi_message *msg,
2863                                 size_t maxsize,
2864                                 gfp_t gfp)
2865 {
2866         struct spi_transfer *xfer;
2867         int ret;
2868
2869         /* iterate over the transfer_list,
2870          * but note that xfer is advanced to the last transfer inserted
2871          * to avoid checking sizes again unnecessarily (also xfer does
2872          * potentiall belong to a different list by the time the
2873          * replacement has happened
2874          */
2875         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2876                 if (xfer->len > maxsize) {
2877                         ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
2878                                                            maxsize, gfp);
2879                         if (ret)
2880                                 return ret;
2881                 }
2882         }
2883
2884         return 0;
2885 }
2886 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2887
2888 /*-------------------------------------------------------------------------*/
2889
2890 /* Core methods for SPI controller protocol drivers.  Some of the
2891  * other core methods are currently defined as inline functions.
2892  */
2893
2894 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
2895                                         u8 bits_per_word)
2896 {
2897         if (ctlr->bits_per_word_mask) {
2898                 /* Only 32 bits fit in the mask */
2899                 if (bits_per_word > 32)
2900                         return -EINVAL;
2901                 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
2902                         return -EINVAL;
2903         }
2904
2905         return 0;
2906 }
2907
2908 /**
2909  * spi_setup - setup SPI mode and clock rate
2910  * @spi: the device whose settings are being modified
2911  * Context: can sleep, and no requests are queued to the device
2912  *
2913  * SPI protocol drivers may need to update the transfer mode if the
2914  * device doesn't work with its default.  They may likewise need
2915  * to update clock rates or word sizes from initial values.  This function
2916  * changes those settings, and must be called from a context that can sleep.
2917  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2918  * effect the next time the device is selected and data is transferred to
2919  * or from it.  When this function returns, the spi device is deselected.
2920  *
2921  * Note that this call will fail if the protocol driver specifies an option
2922  * that the underlying controller or its driver does not support.  For
2923  * example, not all hardware supports wire transfers using nine bit words,
2924  * LSB-first wire encoding, or active-high chipselects.
2925  *
2926  * Return: zero on success, else a negative error code.
2927  */
2928 int spi_setup(struct spi_device *spi)
2929 {
2930         unsigned        bad_bits, ugly_bits;
2931         int             status;
2932
2933         /* check mode to prevent that DUAL and QUAD set at the same time
2934          */
2935         if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2936                 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2937                 dev_err(&spi->dev,
2938                 "setup: can not select dual and quad at the same time\n");
2939                 return -EINVAL;
2940         }
2941         /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2942          */
2943         if ((spi->mode & SPI_3WIRE) && (spi->mode &
2944                 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
2945                  SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
2946                 return -EINVAL;
2947         /* help drivers fail *cleanly* when they need options
2948          * that aren't supported with their current controller
2949          * SPI_CS_WORD has a fallback software implementation,
2950          * so it is ignored here.
2951          */
2952         bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
2953         /* nothing prevents from working with active-high CS in case if it
2954          * is driven by GPIO.
2955          */
2956         if (gpio_is_valid(spi->cs_gpio))
2957                 bad_bits &= ~SPI_CS_HIGH;
2958         ugly_bits = bad_bits &
2959                     (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
2960                      SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
2961         if (ugly_bits) {
2962                 dev_warn(&spi->dev,
2963                          "setup: ignoring unsupported mode bits %x\n",
2964                          ugly_bits);
2965                 spi->mode &= ~ugly_bits;
2966                 bad_bits &= ~ugly_bits;
2967         }
2968         if (bad_bits) {
2969                 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2970                         bad_bits);
2971                 return -EINVAL;
2972         }
2973
2974         if (!spi->bits_per_word)
2975                 spi->bits_per_word = 8;
2976
2977         status = __spi_validate_bits_per_word(spi->controller,
2978                                               spi->bits_per_word);
2979         if (status)
2980                 return status;
2981
2982         if (!spi->max_speed_hz)
2983                 spi->max_speed_hz = spi->controller->max_speed_hz;
2984
2985         if (spi->controller->setup)
2986                 status = spi->controller->setup(spi);
2987
2988         spi_set_cs(spi, false);
2989
2990         dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2991                         (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2992                         (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2993                         (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2994                         (spi->mode & SPI_3WIRE) ? "3wire, " : "",
2995                         (spi->mode & SPI_LOOP) ? "loopback, " : "",
2996                         spi->bits_per_word, spi->max_speed_hz,
2997                         status);
2998
2999         return status;
3000 }
3001 EXPORT_SYMBOL_GPL(spi_setup);
3002
3003 /**
3004  * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3005  * @spi: the device that requires specific CS timing configuration
3006  * @setup: CS setup time in terms of clock count
3007  * @hold: CS hold time in terms of clock count
3008  * @inactive_dly: CS inactive delay between transfers in terms of clock count
3009  */
3010 void spi_set_cs_timing(struct spi_device *spi, u8 setup, u8 hold,
3011                        u8 inactive_dly)
3012 {
3013         if (spi->controller->set_cs_timing)
3014                 spi->controller->set_cs_timing(spi, setup, hold, inactive_dly);
3015 }
3016 EXPORT_SYMBOL_GPL(spi_set_cs_timing);
3017
3018 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3019 {
3020         struct spi_controller *ctlr = spi->controller;
3021         struct spi_transfer *xfer;
3022         int w_size;
3023
3024         if (list_empty(&message->transfers))
3025                 return -EINVAL;
3026
3027         /* If an SPI controller does not support toggling the CS line on each
3028          * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3029          * for the CS line, we can emulate the CS-per-word hardware function by
3030          * splitting transfers into one-word transfers and ensuring that
3031          * cs_change is set for each transfer.
3032          */
3033         if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3034                                           spi->cs_gpiod ||
3035                                           gpio_is_valid(spi->cs_gpio))) {
3036                 size_t maxsize;
3037                 int ret;
3038
3039                 maxsize = (spi->bits_per_word + 7) / 8;
3040
3041                 /* spi_split_transfers_maxsize() requires message->spi */
3042                 message->spi = spi;
3043
3044                 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3045                                                   GFP_KERNEL);
3046                 if (ret)
3047                         return ret;
3048
3049                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3050                         /* don't change cs_change on the last entry in the list */
3051                         if (list_is_last(&xfer->transfer_list, &message->transfers))
3052                                 break;
3053                         xfer->cs_change = 1;
3054                 }
3055         }
3056
3057         /* Half-duplex links include original MicroWire, and ones with
3058          * only one data pin like SPI_3WIRE (switches direction) or where
3059          * either MOSI or MISO is missing.  They can also be caused by
3060          * software limitations.
3061          */
3062         if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3063             (spi->mode & SPI_3WIRE)) {
3064                 unsigned flags = ctlr->flags;
3065
3066                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3067                         if (xfer->rx_buf && xfer->tx_buf)
3068                                 return -EINVAL;
3069                         if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3070                                 return -EINVAL;
3071                         if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3072                                 return -EINVAL;
3073                 }
3074         }
3075
3076         /**
3077          * Set transfer bits_per_word and max speed as spi device default if
3078          * it is not set for this transfer.
3079          * Set transfer tx_nbits and rx_nbits as single transfer default
3080          * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3081          * Ensure transfer word_delay is at least as long as that required by
3082          * device itself.
3083          */
3084         message->frame_length = 0;
3085         list_for_each_entry(xfer, &message->transfers, transfer_list) {
3086                 message->frame_length += xfer->len;
3087                 if (!xfer->bits_per_word)
3088                         xfer->bits_per_word = spi->bits_per_word;
3089
3090                 if (!xfer->speed_hz)
3091                         xfer->speed_hz = spi->max_speed_hz;
3092
3093                 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3094                         xfer->speed_hz = ctlr->max_speed_hz;
3095
3096                 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3097                         return -EINVAL;
3098
3099                 /*
3100                  * SPI transfer length should be multiple of SPI word size
3101                  * where SPI word size should be power-of-two multiple
3102                  */
3103                 if (xfer->bits_per_word <= 8)
3104                         w_size = 1;
3105                 else if (xfer->bits_per_word <= 16)
3106                         w_size = 2;
3107                 else
3108                         w_size = 4;
3109
3110                 /* No partial transfers accepted */
3111                 if (xfer->len % w_size)
3112                         return -EINVAL;
3113
3114                 if (xfer->speed_hz && ctlr->min_speed_hz &&
3115                     xfer->speed_hz < ctlr->min_speed_hz)
3116                         return -EINVAL;
3117
3118                 if (xfer->tx_buf && !xfer->tx_nbits)
3119                         xfer->tx_nbits = SPI_NBITS_SINGLE;
3120                 if (xfer->rx_buf && !xfer->rx_nbits)
3121                         xfer->rx_nbits = SPI_NBITS_SINGLE;
3122                 /* check transfer tx/rx_nbits:
3123                  * 1. check the value matches one of single, dual and quad
3124                  * 2. check tx/rx_nbits match the mode in spi_device
3125                  */
3126                 if (xfer->tx_buf) {
3127                         if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3128                                 xfer->tx_nbits != SPI_NBITS_DUAL &&
3129                                 xfer->tx_nbits != SPI_NBITS_QUAD)
3130                                 return -EINVAL;
3131                         if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3132                                 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3133                                 return -EINVAL;
3134                         if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3135                                 !(spi->mode & SPI_TX_QUAD))
3136                                 return -EINVAL;
3137                 }
3138                 /* check transfer rx_nbits */
3139                 if (xfer->rx_buf) {
3140                         if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3141                                 xfer->rx_nbits != SPI_NBITS_DUAL &&
3142                                 xfer->rx_nbits != SPI_NBITS_QUAD)
3143                                 return -EINVAL;
3144                         if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3145                                 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3146                                 return -EINVAL;
3147                         if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3148                                 !(spi->mode & SPI_RX_QUAD))
3149                                 return -EINVAL;
3150                 }
3151
3152                 if (xfer->word_delay_usecs < spi->word_delay_usecs)
3153                         xfer->word_delay_usecs = spi->word_delay_usecs;
3154         }
3155
3156         message->status = -EINPROGRESS;
3157
3158         return 0;
3159 }
3160
3161 static int __spi_async(struct spi_device *spi, struct spi_message *message)
3162 {
3163         struct spi_controller *ctlr = spi->controller;
3164
3165         /*
3166          * Some controllers do not support doing regular SPI transfers. Return
3167          * ENOTSUPP when this is the case.
3168          */
3169         if (!ctlr->transfer)
3170                 return -ENOTSUPP;
3171
3172         message->spi = spi;
3173
3174         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
3175         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
3176
3177         trace_spi_message_submit(message);
3178
3179         return ctlr->transfer(spi, message);
3180 }
3181
3182 /**
3183  * spi_async - asynchronous SPI transfer
3184  * @spi: device with which data will be exchanged
3185  * @message: describes the data transfers, including completion callback
3186  * Context: any (irqs may be blocked, etc)
3187  *
3188  * This call may be used in_irq and other contexts which can't sleep,
3189  * as well as from task contexts which can sleep.
3190  *
3191  * The completion callback is invoked in a context which can't sleep.
3192  * Before that invocation, the value of message->status is undefined.
3193  * When the callback is issued, message->status holds either zero (to
3194  * indicate complete success) or a negative error code.  After that
3195  * callback returns, the driver which issued the transfer request may
3196  * deallocate the associated memory; it's no longer in use by any SPI
3197  * core or controller driver code.
3198  *
3199  * Note that although all messages to a spi_device are handled in
3200  * FIFO order, messages may go to different devices in other orders.
3201  * Some device might be higher priority, or have various "hard" access
3202  * time requirements, for example.
3203  *
3204  * On detection of any fault during the transfer, processing of
3205  * the entire message is aborted, and the device is deselected.
3206  * Until returning from the associated message completion callback,
3207  * no other spi_message queued to that device will be processed.
3208  * (This rule applies equally to all the synchronous transfer calls,
3209  * which are wrappers around this core asynchronous primitive.)
3210  *
3211  * Return: zero on success, else a negative error code.
3212  */
3213 int spi_async(struct spi_device *spi, struct spi_message *message)
3214 {
3215         struct spi_controller *ctlr = spi->controller;
3216         int ret;
3217         unsigned long flags;
3218
3219         ret = __spi_validate(spi, message);
3220         if (ret != 0)
3221                 return ret;
3222
3223         spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3224
3225         if (ctlr->bus_lock_flag)
3226                 ret = -EBUSY;
3227         else
3228                 ret = __spi_async(spi, message);
3229
3230         spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3231
3232         return ret;
3233 }
3234 EXPORT_SYMBOL_GPL(spi_async);
3235
3236 /**
3237  * spi_async_locked - version of spi_async with exclusive bus usage
3238  * @spi: device with which data will be exchanged
3239  * @message: describes the data transfers, including completion callback
3240  * Context: any (irqs may be blocked, etc)
3241  *
3242  * This call may be used in_irq and other contexts which can't sleep,
3243  * as well as from task contexts which can sleep.
3244  *
3245  * The completion callback is invoked in a context which can't sleep.
3246  * Before that invocation, the value of message->status is undefined.
3247  * When the callback is issued, message->status holds either zero (to
3248  * indicate complete success) or a negative error code.  After that
3249  * callback returns, the driver which issued the transfer request may
3250  * deallocate the associated memory; it's no longer in use by any SPI
3251  * core or controller driver code.
3252  *
3253  * Note that although all messages to a spi_device are handled in
3254  * FIFO order, messages may go to different devices in other orders.
3255  * Some device might be higher priority, or have various "hard" access
3256  * time requirements, for example.
3257  *
3258  * On detection of any fault during the transfer, processing of
3259  * the entire message is aborted, and the device is deselected.
3260  * Until returning from the associated message completion callback,
3261  * no other spi_message queued to that device will be processed.
3262  * (This rule applies equally to all the synchronous transfer calls,
3263  * which are wrappers around this core asynchronous primitive.)
3264  *
3265  * Return: zero on success, else a negative error code.
3266  */
3267 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3268 {
3269         struct spi_controller *ctlr = spi->controller;
3270         int ret;
3271         unsigned long flags;
3272
3273         ret = __spi_validate(spi, message);
3274         if (ret != 0)
3275                 return ret;
3276
3277         spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3278
3279         ret = __spi_async(spi, message);
3280
3281         spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3282
3283         return ret;
3284
3285 }
3286 EXPORT_SYMBOL_GPL(spi_async_locked);
3287
3288 /*-------------------------------------------------------------------------*/
3289
3290 /* Utility methods for SPI protocol drivers, layered on
3291  * top of the core.  Some other utility methods are defined as
3292  * inline functions.
3293  */
3294
3295 static void spi_complete(void *arg)
3296 {
3297         complete(arg);
3298 }
3299
3300 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3301 {
3302         DECLARE_COMPLETION_ONSTACK(done);
3303         int status;
3304         struct spi_controller *ctlr = spi->controller;
3305         unsigned long flags;
3306
3307         status = __spi_validate(spi, message);
3308         if (status != 0)
3309                 return status;
3310
3311         message->complete = spi_complete;
3312         message->context = &done;
3313         message->spi = spi;
3314
3315         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3316         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3317
3318         /* If we're not using the legacy transfer method then we will
3319          * try to transfer in the calling context so special case.
3320          * This code would be less tricky if we could remove the
3321          * support for driver implemented message queues.
3322          */
3323         if (ctlr->transfer == spi_queued_transfer) {
3324                 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3325
3326                 trace_spi_message_submit(message);
3327
3328                 status = __spi_queued_transfer(spi, message, false);
3329
3330                 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3331         } else {
3332                 status = spi_async_locked(spi, message);
3333         }
3334
3335         if (status == 0) {
3336                 /* Push out the messages in the calling context if we
3337                  * can.
3338                  */
3339                 if (ctlr->transfer == spi_queued_transfer) {
3340                         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3341                                                        spi_sync_immediate);
3342                         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3343                                                        spi_sync_immediate);
3344                         __spi_pump_messages(ctlr, false);
3345                 }
3346
3347                 wait_for_completion(&done);
3348                 status = message->status;
3349         }
3350         message->context = NULL;
3351         return status;
3352 }
3353
3354 /**
3355  * spi_sync - blocking/synchronous SPI data transfers
3356  * @spi: device with which data will be exchanged
3357  * @message: describes the data transfers
3358  * Context: can sleep
3359  *
3360  * This call may only be used from a context that may sleep.  The sleep
3361  * is non-interruptible, and has no timeout.  Low-overhead controller
3362  * drivers may DMA directly into and out of the message buffers.
3363  *
3364  * Note that the SPI device's chip select is active during the message,
3365  * and then is normally disabled between messages.  Drivers for some
3366  * frequently-used devices may want to minimize costs of selecting a chip,
3367  * by leaving it selected in anticipation that the next message will go
3368  * to the same chip.  (That may increase power usage.)
3369  *
3370  * Also, the caller is guaranteeing that the memory associated with the
3371  * message will not be freed before this call returns.
3372  *
3373  * Return: zero on success, else a negative error code.
3374  */
3375 int spi_sync(struct spi_device *spi, struct spi_message *message)
3376 {
3377         int ret;
3378
3379         mutex_lock(&spi->controller->bus_lock_mutex);
3380         ret = __spi_sync(spi, message);
3381         mutex_unlock(&spi->controller->bus_lock_mutex);
3382
3383         return ret;
3384 }
3385 EXPORT_SYMBOL_GPL(spi_sync);
3386
3387 /**
3388  * spi_sync_locked - version of spi_sync with exclusive bus usage
3389  * @spi: device with which data will be exchanged
3390  * @message: describes the data transfers
3391  * Context: can sleep
3392  *
3393  * This call may only be used from a context that may sleep.  The sleep
3394  * is non-interruptible, and has no timeout.  Low-overhead controller
3395  * drivers may DMA directly into and out of the message buffers.
3396  *
3397  * This call should be used by drivers that require exclusive access to the
3398  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
3399  * be released by a spi_bus_unlock call when the exclusive access is over.
3400  *
3401  * Return: zero on success, else a negative error code.
3402  */
3403 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
3404 {
3405         return __spi_sync(spi, message);
3406 }
3407 EXPORT_SYMBOL_GPL(spi_sync_locked);
3408
3409 /**
3410  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
3411  * @ctlr: SPI bus master that should be locked for exclusive bus access
3412  * Context: can sleep
3413  *
3414  * This call may only be used from a context that may sleep.  The sleep
3415  * is non-interruptible, and has no timeout.
3416  *
3417  * This call should be used by drivers that require exclusive access to the
3418  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
3419  * exclusive access is over. Data transfer must be done by spi_sync_locked
3420  * and spi_async_locked calls when the SPI bus lock is held.
3421  *
3422  * Return: always zero.
3423  */
3424 int spi_bus_lock(struct spi_controller *ctlr)
3425 {
3426         unsigned long flags;
3427
3428         mutex_lock(&ctlr->bus_lock_mutex);
3429
3430         spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3431         ctlr->bus_lock_flag = 1;
3432         spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3433
3434         /* mutex remains locked until spi_bus_unlock is called */
3435
3436         return 0;
3437 }
3438 EXPORT_SYMBOL_GPL(spi_bus_lock);
3439
3440 /**
3441  * spi_bus_unlock - release the lock for exclusive SPI bus usage
3442  * @ctlr: SPI bus master that was locked for exclusive bus access
3443  * Context: can sleep
3444  *
3445  * This call may only be used from a context that may sleep.  The sleep
3446  * is non-interruptible, and has no timeout.
3447  *
3448  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
3449  * call.
3450  *
3451  * Return: always zero.
3452  */
3453 int spi_bus_unlock(struct spi_controller *ctlr)
3454 {
3455         ctlr->bus_lock_flag = 0;
3456
3457         mutex_unlock(&ctlr->bus_lock_mutex);
3458
3459         return 0;
3460 }
3461 EXPORT_SYMBOL_GPL(spi_bus_unlock);
3462
3463 /* portable code must never pass more than 32 bytes */
3464 #define SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
3465
3466 static u8       *buf;
3467
3468 /**
3469  * spi_write_then_read - SPI synchronous write followed by read
3470  * @spi: device with which data will be exchanged
3471  * @txbuf: data to be written (need not be dma-safe)
3472  * @n_tx: size of txbuf, in bytes
3473  * @rxbuf: buffer into which data will be read (need not be dma-safe)
3474  * @n_rx: size of rxbuf, in bytes
3475  * Context: can sleep
3476  *
3477  * This performs a half duplex MicroWire style transaction with the
3478  * device, sending txbuf and then reading rxbuf.  The return value
3479  * is zero for success, else a negative errno status code.
3480  * This call may only be used from a context that may sleep.
3481  *
3482  * Parameters to this routine are always copied using a small buffer;
3483  * portable code should never use this for more than 32 bytes.
3484  * Performance-sensitive or bulk transfer code should instead use
3485  * spi_{async,sync}() calls with dma-safe buffers.
3486  *
3487  * Return: zero on success, else a negative error code.
3488  */
3489 int spi_write_then_read(struct spi_device *spi,
3490                 const void *txbuf, unsigned n_tx,
3491                 void *rxbuf, unsigned n_rx)
3492 {
3493         static DEFINE_MUTEX(lock);
3494
3495         int                     status;
3496         struct spi_message      message;
3497         struct spi_transfer     x[2];
3498         u8                      *local_buf;
3499
3500         /* Use preallocated DMA-safe buffer if we can.  We can't avoid
3501          * copying here, (as a pure convenience thing), but we can
3502          * keep heap costs out of the hot path unless someone else is
3503          * using the pre-allocated buffer or the transfer is too large.
3504          */
3505         if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3506                 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3507                                     GFP_KERNEL | GFP_DMA);
3508                 if (!local_buf)
3509                         return -ENOMEM;
3510         } else {
3511                 local_buf = buf;
3512         }
3513
3514         spi_message_init(&message);
3515         memset(x, 0, sizeof(x));
3516         if (n_tx) {
3517                 x[0].len = n_tx;
3518                 spi_message_add_tail(&x[0], &message);
3519         }
3520         if (n_rx) {
3521                 x[1].len = n_rx;
3522                 spi_message_add_tail(&x[1], &message);
3523         }
3524
3525         memcpy(local_buf, txbuf, n_tx);
3526         x[0].tx_buf = local_buf;
3527         x[1].rx_buf = local_buf + n_tx;
3528
3529         /* do the i/o */
3530         status = spi_sync(spi, &message);
3531         if (status == 0)
3532                 memcpy(rxbuf, x[1].rx_buf, n_rx);
3533
3534         if (x[0].tx_buf == buf)
3535                 mutex_unlock(&lock);
3536         else
3537                 kfree(local_buf);
3538
3539         return status;
3540 }
3541 EXPORT_SYMBOL_GPL(spi_write_then_read);
3542
3543 /*-------------------------------------------------------------------------*/
3544
3545 #if IS_ENABLED(CONFIG_OF)
3546 static int __spi_of_device_match(struct device *dev, void *data)
3547 {
3548         return dev->of_node == data;
3549 }
3550
3551 /* must call put_device() when done with returned spi_device device */
3552 struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3553 {
3554         struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
3555                                                 __spi_of_device_match);
3556         return dev ? to_spi_device(dev) : NULL;
3557 }
3558 EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
3559 #endif /* IS_ENABLED(CONFIG_OF) */
3560
3561 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
3562 static int __spi_of_controller_match(struct device *dev, const void *data)
3563 {
3564         return dev->of_node == data;
3565 }
3566
3567 /* the spi controllers are not using spi_bus, so we find it with another way */
3568 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
3569 {
3570         struct device *dev;
3571
3572         dev = class_find_device(&spi_master_class, NULL, node,
3573                                 __spi_of_controller_match);
3574         if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3575                 dev = class_find_device(&spi_slave_class, NULL, node,
3576                                         __spi_of_controller_match);
3577         if (!dev)
3578                 return NULL;
3579
3580         /* reference got in class_find_device */
3581         return container_of(dev, struct spi_controller, dev);
3582 }
3583
3584 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3585                          void *arg)
3586 {
3587         struct of_reconfig_data *rd = arg;
3588         struct spi_controller *ctlr;
3589         struct spi_device *spi;
3590
3591         switch (of_reconfig_get_state_change(action, arg)) {
3592         case OF_RECONFIG_CHANGE_ADD:
3593                 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
3594                 if (ctlr == NULL)
3595                         return NOTIFY_OK;       /* not for us */
3596
3597                 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3598                         put_device(&ctlr->dev);
3599                         return NOTIFY_OK;
3600                 }
3601
3602                 spi = of_register_spi_device(ctlr, rd->dn);
3603                 put_device(&ctlr->dev);
3604
3605                 if (IS_ERR(spi)) {
3606                         pr_err("%s: failed to create for '%pOF'\n",
3607                                         __func__, rd->dn);
3608                         of_node_clear_flag(rd->dn, OF_POPULATED);
3609                         return notifier_from_errno(PTR_ERR(spi));
3610                 }
3611                 break;
3612
3613         case OF_RECONFIG_CHANGE_REMOVE:
3614                 /* already depopulated? */
3615                 if (!of_node_check_flag(rd->dn, OF_POPULATED))
3616                         return NOTIFY_OK;
3617
3618                 /* find our device by node */
3619                 spi = of_find_spi_device_by_node(rd->dn);
3620                 if (spi == NULL)
3621                         return NOTIFY_OK;       /* no? not meant for us */
3622
3623                 /* unregister takes one ref away */
3624                 spi_unregister_device(spi);
3625
3626                 /* and put the reference of the find */
3627                 put_device(&spi->dev);
3628                 break;
3629         }
3630
3631         return NOTIFY_OK;
3632 }
3633
3634 static struct notifier_block spi_of_notifier = {
3635         .notifier_call = of_spi_notify,
3636 };
3637 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3638 extern struct notifier_block spi_of_notifier;
3639 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3640
3641 #if IS_ENABLED(CONFIG_ACPI)
3642 static int spi_acpi_controller_match(struct device *dev, const void *data)
3643 {
3644         return ACPI_COMPANION(dev->parent) == data;
3645 }
3646
3647 static int spi_acpi_device_match(struct device *dev, void *data)
3648 {
3649         return ACPI_COMPANION(dev) == data;
3650 }
3651
3652 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
3653 {
3654         struct device *dev;
3655
3656         dev = class_find_device(&spi_master_class, NULL, adev,
3657                                 spi_acpi_controller_match);
3658         if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3659                 dev = class_find_device(&spi_slave_class, NULL, adev,
3660                                         spi_acpi_controller_match);
3661         if (!dev)
3662                 return NULL;
3663
3664         return container_of(dev, struct spi_controller, dev);
3665 }
3666
3667 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
3668 {
3669         struct device *dev;
3670
3671         dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
3672
3673         return dev ? to_spi_device(dev) : NULL;
3674 }
3675
3676 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3677                            void *arg)
3678 {
3679         struct acpi_device *adev = arg;
3680         struct spi_controller *ctlr;
3681         struct spi_device *spi;
3682
3683         switch (value) {
3684         case ACPI_RECONFIG_DEVICE_ADD:
3685                 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
3686                 if (!ctlr)
3687                         break;
3688
3689                 acpi_register_spi_device(ctlr, adev);
3690                 put_device(&ctlr->dev);
3691                 break;
3692         case ACPI_RECONFIG_DEVICE_REMOVE:
3693                 if (!acpi_device_enumerated(adev))
3694                         break;
3695
3696                 spi = acpi_spi_find_device_by_adev(adev);
3697                 if (!spi)
3698                         break;
3699
3700                 spi_unregister_device(spi);
3701                 put_device(&spi->dev);
3702                 break;
3703         }
3704
3705         return NOTIFY_OK;
3706 }
3707
3708 static struct notifier_block spi_acpi_notifier = {
3709         .notifier_call = acpi_spi_notify,
3710 };
3711 #else
3712 extern struct notifier_block spi_acpi_notifier;
3713 #endif
3714
3715 static int __init spi_init(void)
3716 {
3717         int     status;
3718
3719         buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3720         if (!buf) {
3721                 status = -ENOMEM;
3722                 goto err0;
3723         }
3724
3725         status = bus_register(&spi_bus_type);
3726         if (status < 0)
3727                 goto err1;
3728
3729         status = class_register(&spi_master_class);
3730         if (status < 0)
3731                 goto err2;
3732
3733         if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
3734                 status = class_register(&spi_slave_class);
3735                 if (status < 0)
3736                         goto err3;
3737         }
3738
3739         if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3740                 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3741         if (IS_ENABLED(CONFIG_ACPI))
3742                 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3743
3744         return 0;
3745
3746 err3:
3747         class_unregister(&spi_master_class);
3748 err2:
3749         bus_unregister(&spi_bus_type);
3750 err1:
3751         kfree(buf);
3752         buf = NULL;
3753 err0:
3754         return status;
3755 }
3756
3757 /* board_info is normally registered in arch_initcall(),
3758  * but even essential drivers wait till later
3759  *
3760  * REVISIT only boardinfo really needs static linking. the rest (device and
3761  * driver registration) _could_ be dynamically linked (modular) ... costs
3762  * include needing to have boardinfo data structures be much more public.
3763  */
3764 postcore_initcall(spi_init);
3765