]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/spi/spi-tegra114.c
Merge branch 'next' into for-linus
[linux.git] / drivers / spi / spi-tegra114.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SPI driver for NVIDIA's Tegra114 SPI Controller.
4  *
5  * Copyright (c) 2013, NVIDIA CORPORATION.  All rights reserved.
6  */
7
8 #include <linux/clk.h>
9 #include <linux/completion.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/kernel.h>
18 #include <linux/kthread.h>
19 #include <linux/module.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/of.h>
23 #include <linux/of_device.h>
24 #include <linux/reset.h>
25 #include <linux/spi/spi.h>
26
27 #define SPI_COMMAND1                            0x000
28 #define SPI_BIT_LENGTH(x)                       (((x) & 0x1f) << 0)
29 #define SPI_PACKED                              (1 << 5)
30 #define SPI_TX_EN                               (1 << 11)
31 #define SPI_RX_EN                               (1 << 12)
32 #define SPI_BOTH_EN_BYTE                        (1 << 13)
33 #define SPI_BOTH_EN_BIT                         (1 << 14)
34 #define SPI_LSBYTE_FE                           (1 << 15)
35 #define SPI_LSBIT_FE                            (1 << 16)
36 #define SPI_BIDIROE                             (1 << 17)
37 #define SPI_IDLE_SDA_DRIVE_LOW                  (0 << 18)
38 #define SPI_IDLE_SDA_DRIVE_HIGH                 (1 << 18)
39 #define SPI_IDLE_SDA_PULL_LOW                   (2 << 18)
40 #define SPI_IDLE_SDA_PULL_HIGH                  (3 << 18)
41 #define SPI_IDLE_SDA_MASK                       (3 << 18)
42 #define SPI_CS_SW_VAL                           (1 << 20)
43 #define SPI_CS_SW_HW                            (1 << 21)
44 /* SPI_CS_POL_INACTIVE bits are default high */
45                                                 /* n from 0 to 3 */
46 #define SPI_CS_POL_INACTIVE(n)                  (1 << (22 + (n)))
47 #define SPI_CS_POL_INACTIVE_MASK                (0xF << 22)
48
49 #define SPI_CS_SEL_0                            (0 << 26)
50 #define SPI_CS_SEL_1                            (1 << 26)
51 #define SPI_CS_SEL_2                            (2 << 26)
52 #define SPI_CS_SEL_3                            (3 << 26)
53 #define SPI_CS_SEL_MASK                         (3 << 26)
54 #define SPI_CS_SEL(x)                           (((x) & 0x3) << 26)
55 #define SPI_CONTROL_MODE_0                      (0 << 28)
56 #define SPI_CONTROL_MODE_1                      (1 << 28)
57 #define SPI_CONTROL_MODE_2                      (2 << 28)
58 #define SPI_CONTROL_MODE_3                      (3 << 28)
59 #define SPI_CONTROL_MODE_MASK                   (3 << 28)
60 #define SPI_MODE_SEL(x)                         (((x) & 0x3) << 28)
61 #define SPI_M_S                                 (1 << 30)
62 #define SPI_PIO                                 (1 << 31)
63
64 #define SPI_COMMAND2                            0x004
65 #define SPI_TX_TAP_DELAY(x)                     (((x) & 0x3F) << 6)
66 #define SPI_RX_TAP_DELAY(x)                     (((x) & 0x3F) << 0)
67
68 #define SPI_CS_TIMING1                          0x008
69 #define SPI_SETUP_HOLD(setup, hold)             (((setup) << 4) | (hold))
70 #define SPI_CS_SETUP_HOLD(reg, cs, val)                 \
71                 ((((val) & 0xFFu) << ((cs) * 8)) |      \
72                 ((reg) & ~(0xFFu << ((cs) * 8))))
73
74 #define SPI_CS_TIMING2                          0x00C
75 #define CYCLES_BETWEEN_PACKETS_0(x)             (((x) & 0x1F) << 0)
76 #define CS_ACTIVE_BETWEEN_PACKETS_0             (1 << 5)
77 #define CYCLES_BETWEEN_PACKETS_1(x)             (((x) & 0x1F) << 8)
78 #define CS_ACTIVE_BETWEEN_PACKETS_1             (1 << 13)
79 #define CYCLES_BETWEEN_PACKETS_2(x)             (((x) & 0x1F) << 16)
80 #define CS_ACTIVE_BETWEEN_PACKETS_2             (1 << 21)
81 #define CYCLES_BETWEEN_PACKETS_3(x)             (((x) & 0x1F) << 24)
82 #define CS_ACTIVE_BETWEEN_PACKETS_3             (1 << 29)
83 #define SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(reg, cs, val)         \
84                 (reg = (((val) & 0x1) << ((cs) * 8 + 5)) |      \
85                         ((reg) & ~(1 << ((cs) * 8 + 5))))
86 #define SPI_SET_CYCLES_BETWEEN_PACKETS(reg, cs, val)            \
87                 (reg = (((val) & 0xF) << ((cs) * 8)) |          \
88                         ((reg) & ~(0xF << ((cs) * 8))))
89
90 #define SPI_TRANS_STATUS                        0x010
91 #define SPI_BLK_CNT(val)                        (((val) >> 0) & 0xFFFF)
92 #define SPI_SLV_IDLE_COUNT(val)                 (((val) >> 16) & 0xFF)
93 #define SPI_RDY                                 (1 << 30)
94
95 #define SPI_FIFO_STATUS                         0x014
96 #define SPI_RX_FIFO_EMPTY                       (1 << 0)
97 #define SPI_RX_FIFO_FULL                        (1 << 1)
98 #define SPI_TX_FIFO_EMPTY                       (1 << 2)
99 #define SPI_TX_FIFO_FULL                        (1 << 3)
100 #define SPI_RX_FIFO_UNF                         (1 << 4)
101 #define SPI_RX_FIFO_OVF                         (1 << 5)
102 #define SPI_TX_FIFO_UNF                         (1 << 6)
103 #define SPI_TX_FIFO_OVF                         (1 << 7)
104 #define SPI_ERR                                 (1 << 8)
105 #define SPI_TX_FIFO_FLUSH                       (1 << 14)
106 #define SPI_RX_FIFO_FLUSH                       (1 << 15)
107 #define SPI_TX_FIFO_EMPTY_COUNT(val)            (((val) >> 16) & 0x7F)
108 #define SPI_RX_FIFO_FULL_COUNT(val)             (((val) >> 23) & 0x7F)
109 #define SPI_FRAME_END                           (1 << 30)
110 #define SPI_CS_INACTIVE                         (1 << 31)
111
112 #define SPI_FIFO_ERROR                          (SPI_RX_FIFO_UNF | \
113                         SPI_RX_FIFO_OVF | SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF)
114 #define SPI_FIFO_EMPTY                  (SPI_RX_FIFO_EMPTY | SPI_TX_FIFO_EMPTY)
115
116 #define SPI_TX_DATA                             0x018
117 #define SPI_RX_DATA                             0x01C
118
119 #define SPI_DMA_CTL                             0x020
120 #define SPI_TX_TRIG_1                           (0 << 15)
121 #define SPI_TX_TRIG_4                           (1 << 15)
122 #define SPI_TX_TRIG_8                           (2 << 15)
123 #define SPI_TX_TRIG_16                          (3 << 15)
124 #define SPI_TX_TRIG_MASK                        (3 << 15)
125 #define SPI_RX_TRIG_1                           (0 << 19)
126 #define SPI_RX_TRIG_4                           (1 << 19)
127 #define SPI_RX_TRIG_8                           (2 << 19)
128 #define SPI_RX_TRIG_16                          (3 << 19)
129 #define SPI_RX_TRIG_MASK                        (3 << 19)
130 #define SPI_IE_TX                               (1 << 28)
131 #define SPI_IE_RX                               (1 << 29)
132 #define SPI_CONT                                (1 << 30)
133 #define SPI_DMA                                 (1 << 31)
134 #define SPI_DMA_EN                              SPI_DMA
135
136 #define SPI_DMA_BLK                             0x024
137 #define SPI_DMA_BLK_SET(x)                      (((x) & 0xFFFF) << 0)
138
139 #define SPI_TX_FIFO                             0x108
140 #define SPI_RX_FIFO                             0x188
141 #define SPI_INTR_MASK                           0x18c
142 #define SPI_INTR_ALL_MASK                       (0x1fUL << 25)
143 #define MAX_CHIP_SELECT                         4
144 #define SPI_FIFO_DEPTH                          64
145 #define DATA_DIR_TX                             (1 << 0)
146 #define DATA_DIR_RX                             (1 << 1)
147
148 #define SPI_DMA_TIMEOUT                         (msecs_to_jiffies(1000))
149 #define DEFAULT_SPI_DMA_BUF_LEN                 (16*1024)
150 #define TX_FIFO_EMPTY_COUNT_MAX                 SPI_TX_FIFO_EMPTY_COUNT(0x40)
151 #define RX_FIFO_FULL_COUNT_ZERO                 SPI_RX_FIFO_FULL_COUNT(0)
152 #define MAX_HOLD_CYCLES                         16
153 #define SPI_DEFAULT_SPEED                       25000000
154
155 struct tegra_spi_soc_data {
156         bool has_intr_mask_reg;
157 };
158
159 struct tegra_spi_data {
160         struct device                           *dev;
161         struct spi_master                       *master;
162         spinlock_t                              lock;
163
164         struct clk                              *clk;
165         struct reset_control                    *rst;
166         void __iomem                            *base;
167         phys_addr_t                             phys;
168         unsigned                                irq;
169         u32                                     cur_speed;
170
171         struct spi_device                       *cur_spi;
172         struct spi_device                       *cs_control;
173         unsigned                                cur_pos;
174         unsigned                                words_per_32bit;
175         unsigned                                bytes_per_word;
176         unsigned                                curr_dma_words;
177         unsigned                                cur_direction;
178
179         unsigned                                cur_rx_pos;
180         unsigned                                cur_tx_pos;
181
182         unsigned                                dma_buf_size;
183         unsigned                                max_buf_size;
184         bool                                    is_curr_dma_xfer;
185
186         struct completion                       rx_dma_complete;
187         struct completion                       tx_dma_complete;
188
189         u32                                     tx_status;
190         u32                                     rx_status;
191         u32                                     status_reg;
192         bool                                    is_packed;
193
194         u32                                     command1_reg;
195         u32                                     dma_control_reg;
196         u32                                     def_command1_reg;
197
198         struct completion                       xfer_completion;
199         struct spi_transfer                     *curr_xfer;
200         struct dma_chan                         *rx_dma_chan;
201         u32                                     *rx_dma_buf;
202         dma_addr_t                              rx_dma_phys;
203         struct dma_async_tx_descriptor          *rx_dma_desc;
204
205         struct dma_chan                         *tx_dma_chan;
206         u32                                     *tx_dma_buf;
207         dma_addr_t                              tx_dma_phys;
208         struct dma_async_tx_descriptor          *tx_dma_desc;
209         const struct tegra_spi_soc_data         *soc_data;
210 };
211
212 static int tegra_spi_runtime_suspend(struct device *dev);
213 static int tegra_spi_runtime_resume(struct device *dev);
214
215 static inline u32 tegra_spi_readl(struct tegra_spi_data *tspi,
216                 unsigned long reg)
217 {
218         return readl(tspi->base + reg);
219 }
220
221 static inline void tegra_spi_writel(struct tegra_spi_data *tspi,
222                 u32 val, unsigned long reg)
223 {
224         writel(val, tspi->base + reg);
225
226         /* Read back register to make sure that register writes completed */
227         if (reg != SPI_TX_FIFO)
228                 readl(tspi->base + SPI_COMMAND1);
229 }
230
231 static void tegra_spi_clear_status(struct tegra_spi_data *tspi)
232 {
233         u32 val;
234
235         /* Write 1 to clear status register */
236         val = tegra_spi_readl(tspi, SPI_TRANS_STATUS);
237         tegra_spi_writel(tspi, val, SPI_TRANS_STATUS);
238
239         /* Clear fifo status error if any */
240         val = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
241         if (val & SPI_ERR)
242                 tegra_spi_writel(tspi, SPI_ERR | SPI_FIFO_ERROR,
243                                 SPI_FIFO_STATUS);
244 }
245
246 static unsigned tegra_spi_calculate_curr_xfer_param(
247         struct spi_device *spi, struct tegra_spi_data *tspi,
248         struct spi_transfer *t)
249 {
250         unsigned remain_len = t->len - tspi->cur_pos;
251         unsigned max_word;
252         unsigned bits_per_word = t->bits_per_word;
253         unsigned max_len;
254         unsigned total_fifo_words;
255
256         tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
257
258         if ((bits_per_word == 8 || bits_per_word == 16 ||
259              bits_per_word == 32) && t->len > 3) {
260                 tspi->is_packed = 1;
261                 tspi->words_per_32bit = 32/bits_per_word;
262         } else {
263                 tspi->is_packed = 0;
264                 tspi->words_per_32bit = 1;
265         }
266
267         if (tspi->is_packed) {
268                 max_len = min(remain_len, tspi->max_buf_size);
269                 tspi->curr_dma_words = max_len/tspi->bytes_per_word;
270                 total_fifo_words = (max_len + 3) / 4;
271         } else {
272                 max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
273                 max_word = min(max_word, tspi->max_buf_size/4);
274                 tspi->curr_dma_words = max_word;
275                 total_fifo_words = max_word;
276         }
277         return total_fifo_words;
278 }
279
280 static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
281         struct tegra_spi_data *tspi, struct spi_transfer *t)
282 {
283         unsigned nbytes;
284         unsigned tx_empty_count;
285         u32 fifo_status;
286         unsigned max_n_32bit;
287         unsigned i, count;
288         unsigned int written_words;
289         unsigned fifo_words_left;
290         u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
291
292         fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
293         tx_empty_count = SPI_TX_FIFO_EMPTY_COUNT(fifo_status);
294
295         if (tspi->is_packed) {
296                 fifo_words_left = tx_empty_count * tspi->words_per_32bit;
297                 written_words = min(fifo_words_left, tspi->curr_dma_words);
298                 nbytes = written_words * tspi->bytes_per_word;
299                 max_n_32bit = DIV_ROUND_UP(nbytes, 4);
300                 for (count = 0; count < max_n_32bit; count++) {
301                         u32 x = 0;
302
303                         for (i = 0; (i < 4) && nbytes; i++, nbytes--)
304                                 x |= (u32)(*tx_buf++) << (i * 8);
305                         tegra_spi_writel(tspi, x, SPI_TX_FIFO);
306                 }
307
308                 tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
309         } else {
310                 unsigned int write_bytes;
311                 max_n_32bit = min(tspi->curr_dma_words,  tx_empty_count);
312                 written_words = max_n_32bit;
313                 nbytes = written_words * tspi->bytes_per_word;
314                 if (nbytes > t->len - tspi->cur_pos)
315                         nbytes = t->len - tspi->cur_pos;
316                 write_bytes = nbytes;
317                 for (count = 0; count < max_n_32bit; count++) {
318                         u32 x = 0;
319
320                         for (i = 0; nbytes && (i < tspi->bytes_per_word);
321                                                         i++, nbytes--)
322                                 x |= (u32)(*tx_buf++) << (i * 8);
323                         tegra_spi_writel(tspi, x, SPI_TX_FIFO);
324                 }
325
326                 tspi->cur_tx_pos += write_bytes;
327         }
328
329         return written_words;
330 }
331
332 static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
333                 struct tegra_spi_data *tspi, struct spi_transfer *t)
334 {
335         unsigned rx_full_count;
336         u32 fifo_status;
337         unsigned i, count;
338         unsigned int read_words = 0;
339         unsigned len;
340         u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
341
342         fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
343         rx_full_count = SPI_RX_FIFO_FULL_COUNT(fifo_status);
344         if (tspi->is_packed) {
345                 len = tspi->curr_dma_words * tspi->bytes_per_word;
346                 for (count = 0; count < rx_full_count; count++) {
347                         u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO);
348
349                         for (i = 0; len && (i < 4); i++, len--)
350                                 *rx_buf++ = (x >> i*8) & 0xFF;
351                 }
352                 read_words += tspi->curr_dma_words;
353                 tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
354         } else {
355                 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
356                 u8 bytes_per_word = tspi->bytes_per_word;
357                 unsigned int read_bytes;
358
359                 len = rx_full_count * bytes_per_word;
360                 if (len > t->len - tspi->cur_pos)
361                         len = t->len - tspi->cur_pos;
362                 read_bytes = len;
363                 for (count = 0; count < rx_full_count; count++) {
364                         u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO) & rx_mask;
365
366                         for (i = 0; len && (i < bytes_per_word); i++, len--)
367                                 *rx_buf++ = (x >> (i*8)) & 0xFF;
368                 }
369                 read_words += rx_full_count;
370                 tspi->cur_rx_pos += read_bytes;
371         }
372
373         return read_words;
374 }
375
376 static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
377                 struct tegra_spi_data *tspi, struct spi_transfer *t)
378 {
379         /* Make the dma buffer to read by cpu */
380         dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
381                                 tspi->dma_buf_size, DMA_TO_DEVICE);
382
383         if (tspi->is_packed) {
384                 unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
385
386                 memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
387                 tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
388         } else {
389                 unsigned int i;
390                 unsigned int count;
391                 u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
392                 unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
393                 unsigned int write_bytes;
394
395                 if (consume > t->len - tspi->cur_pos)
396                         consume = t->len - tspi->cur_pos;
397                 write_bytes = consume;
398                 for (count = 0; count < tspi->curr_dma_words; count++) {
399                         u32 x = 0;
400
401                         for (i = 0; consume && (i < tspi->bytes_per_word);
402                                                         i++, consume--)
403                                 x |= (u32)(*tx_buf++) << (i * 8);
404                         tspi->tx_dma_buf[count] = x;
405                 }
406
407                 tspi->cur_tx_pos += write_bytes;
408         }
409
410         /* Make the dma buffer to read by dma */
411         dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
412                                 tspi->dma_buf_size, DMA_TO_DEVICE);
413 }
414
415 static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
416                 struct tegra_spi_data *tspi, struct spi_transfer *t)
417 {
418         /* Make the dma buffer to read by cpu */
419         dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
420                 tspi->dma_buf_size, DMA_FROM_DEVICE);
421
422         if (tspi->is_packed) {
423                 unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
424
425                 memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
426                 tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
427         } else {
428                 unsigned int i;
429                 unsigned int count;
430                 unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
431                 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
432                 unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
433                 unsigned int read_bytes;
434
435                 if (consume > t->len - tspi->cur_pos)
436                         consume = t->len - tspi->cur_pos;
437                 read_bytes = consume;
438                 for (count = 0; count < tspi->curr_dma_words; count++) {
439                         u32 x = tspi->rx_dma_buf[count] & rx_mask;
440
441                         for (i = 0; consume && (i < tspi->bytes_per_word);
442                                                         i++, consume--)
443                                 *rx_buf++ = (x >> (i*8)) & 0xFF;
444                 }
445
446                 tspi->cur_rx_pos += read_bytes;
447         }
448
449         /* Make the dma buffer to read by dma */
450         dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
451                 tspi->dma_buf_size, DMA_FROM_DEVICE);
452 }
453
454 static void tegra_spi_dma_complete(void *args)
455 {
456         struct completion *dma_complete = args;
457
458         complete(dma_complete);
459 }
460
461 static int tegra_spi_start_tx_dma(struct tegra_spi_data *tspi, int len)
462 {
463         reinit_completion(&tspi->tx_dma_complete);
464         tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
465                                 tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
466                                 DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
467         if (!tspi->tx_dma_desc) {
468                 dev_err(tspi->dev, "Not able to get desc for Tx\n");
469                 return -EIO;
470         }
471
472         tspi->tx_dma_desc->callback = tegra_spi_dma_complete;
473         tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
474
475         dmaengine_submit(tspi->tx_dma_desc);
476         dma_async_issue_pending(tspi->tx_dma_chan);
477         return 0;
478 }
479
480 static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
481 {
482         reinit_completion(&tspi->rx_dma_complete);
483         tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
484                                 tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
485                                 DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
486         if (!tspi->rx_dma_desc) {
487                 dev_err(tspi->dev, "Not able to get desc for Rx\n");
488                 return -EIO;
489         }
490
491         tspi->rx_dma_desc->callback = tegra_spi_dma_complete;
492         tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
493
494         dmaengine_submit(tspi->rx_dma_desc);
495         dma_async_issue_pending(tspi->rx_dma_chan);
496         return 0;
497 }
498
499 static int tegra_spi_flush_fifos(struct tegra_spi_data *tspi)
500 {
501         unsigned long timeout = jiffies + HZ;
502         u32 status;
503
504         status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
505         if ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
506                 status |= SPI_RX_FIFO_FLUSH | SPI_TX_FIFO_FLUSH;
507                 tegra_spi_writel(tspi, status, SPI_FIFO_STATUS);
508                 while ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
509                         status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
510                         if (time_after(jiffies, timeout)) {
511                                 dev_err(tspi->dev,
512                                         "timeout waiting for fifo flush\n");
513                                 return -EIO;
514                         }
515
516                         udelay(1);
517                 }
518         }
519
520         return 0;
521 }
522
523 static int tegra_spi_start_dma_based_transfer(
524                 struct tegra_spi_data *tspi, struct spi_transfer *t)
525 {
526         u32 val;
527         unsigned int len;
528         int ret = 0;
529         u8 dma_burst;
530         struct dma_slave_config dma_sconfig = {0};
531
532         val = SPI_DMA_BLK_SET(tspi->curr_dma_words - 1);
533         tegra_spi_writel(tspi, val, SPI_DMA_BLK);
534
535         if (tspi->is_packed)
536                 len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
537                                         4) * 4;
538         else
539                 len = tspi->curr_dma_words * 4;
540
541         /* Set attention level based on length of transfer */
542         if (len & 0xF) {
543                 val |= SPI_TX_TRIG_1 | SPI_RX_TRIG_1;
544                 dma_burst = 1;
545         } else if (((len) >> 4) & 0x1) {
546                 val |= SPI_TX_TRIG_4 | SPI_RX_TRIG_4;
547                 dma_burst = 4;
548         } else {
549                 val |= SPI_TX_TRIG_8 | SPI_RX_TRIG_8;
550                 dma_burst = 8;
551         }
552
553         if (!tspi->soc_data->has_intr_mask_reg) {
554                 if (tspi->cur_direction & DATA_DIR_TX)
555                         val |= SPI_IE_TX;
556
557                 if (tspi->cur_direction & DATA_DIR_RX)
558                         val |= SPI_IE_RX;
559         }
560
561         tegra_spi_writel(tspi, val, SPI_DMA_CTL);
562         tspi->dma_control_reg = val;
563
564         dma_sconfig.device_fc = true;
565         if (tspi->cur_direction & DATA_DIR_TX) {
566                 dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO;
567                 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
568                 dma_sconfig.dst_maxburst = dma_burst;
569                 ret = dmaengine_slave_config(tspi->tx_dma_chan, &dma_sconfig);
570                 if (ret < 0) {
571                         dev_err(tspi->dev,
572                                 "DMA slave config failed: %d\n", ret);
573                         return ret;
574                 }
575
576                 tegra_spi_copy_client_txbuf_to_spi_txbuf(tspi, t);
577                 ret = tegra_spi_start_tx_dma(tspi, len);
578                 if (ret < 0) {
579                         dev_err(tspi->dev,
580                                 "Starting tx dma failed, err %d\n", ret);
581                         return ret;
582                 }
583         }
584
585         if (tspi->cur_direction & DATA_DIR_RX) {
586                 dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
587                 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
588                 dma_sconfig.src_maxburst = dma_burst;
589                 ret = dmaengine_slave_config(tspi->rx_dma_chan, &dma_sconfig);
590                 if (ret < 0) {
591                         dev_err(tspi->dev,
592                                 "DMA slave config failed: %d\n", ret);
593                         return ret;
594                 }
595
596                 /* Make the dma buffer to read by dma */
597                 dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
598                                 tspi->dma_buf_size, DMA_FROM_DEVICE);
599
600                 ret = tegra_spi_start_rx_dma(tspi, len);
601                 if (ret < 0) {
602                         dev_err(tspi->dev,
603                                 "Starting rx dma failed, err %d\n", ret);
604                         if (tspi->cur_direction & DATA_DIR_TX)
605                                 dmaengine_terminate_all(tspi->tx_dma_chan);
606                         return ret;
607                 }
608         }
609         tspi->is_curr_dma_xfer = true;
610         tspi->dma_control_reg = val;
611
612         val |= SPI_DMA_EN;
613         tegra_spi_writel(tspi, val, SPI_DMA_CTL);
614         return ret;
615 }
616
617 static int tegra_spi_start_cpu_based_transfer(
618                 struct tegra_spi_data *tspi, struct spi_transfer *t)
619 {
620         u32 val;
621         unsigned cur_words;
622
623         if (tspi->cur_direction & DATA_DIR_TX)
624                 cur_words = tegra_spi_fill_tx_fifo_from_client_txbuf(tspi, t);
625         else
626                 cur_words = tspi->curr_dma_words;
627
628         val = SPI_DMA_BLK_SET(cur_words - 1);
629         tegra_spi_writel(tspi, val, SPI_DMA_BLK);
630
631         val = 0;
632         if (tspi->cur_direction & DATA_DIR_TX)
633                 val |= SPI_IE_TX;
634
635         if (tspi->cur_direction & DATA_DIR_RX)
636                 val |= SPI_IE_RX;
637
638         tegra_spi_writel(tspi, val, SPI_DMA_CTL);
639         tspi->dma_control_reg = val;
640
641         tspi->is_curr_dma_xfer = false;
642
643         val = tspi->command1_reg;
644         val |= SPI_PIO;
645         tegra_spi_writel(tspi, val, SPI_COMMAND1);
646         return 0;
647 }
648
649 static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
650                         bool dma_to_memory)
651 {
652         struct dma_chan *dma_chan;
653         u32 *dma_buf;
654         dma_addr_t dma_phys;
655         int ret;
656
657         dma_chan = dma_request_slave_channel_reason(tspi->dev,
658                                         dma_to_memory ? "rx" : "tx");
659         if (IS_ERR(dma_chan)) {
660                 ret = PTR_ERR(dma_chan);
661                 if (ret != -EPROBE_DEFER)
662                         dev_err(tspi->dev,
663                                 "Dma channel is not available: %d\n", ret);
664                 return ret;
665         }
666
667         dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
668                                 &dma_phys, GFP_KERNEL);
669         if (!dma_buf) {
670                 dev_err(tspi->dev, " Not able to allocate the dma buffer\n");
671                 dma_release_channel(dma_chan);
672                 return -ENOMEM;
673         }
674
675         if (dma_to_memory) {
676                 tspi->rx_dma_chan = dma_chan;
677                 tspi->rx_dma_buf = dma_buf;
678                 tspi->rx_dma_phys = dma_phys;
679         } else {
680                 tspi->tx_dma_chan = dma_chan;
681                 tspi->tx_dma_buf = dma_buf;
682                 tspi->tx_dma_phys = dma_phys;
683         }
684         return 0;
685 }
686
687 static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
688         bool dma_to_memory)
689 {
690         u32 *dma_buf;
691         dma_addr_t dma_phys;
692         struct dma_chan *dma_chan;
693
694         if (dma_to_memory) {
695                 dma_buf = tspi->rx_dma_buf;
696                 dma_chan = tspi->rx_dma_chan;
697                 dma_phys = tspi->rx_dma_phys;
698                 tspi->rx_dma_chan = NULL;
699                 tspi->rx_dma_buf = NULL;
700         } else {
701                 dma_buf = tspi->tx_dma_buf;
702                 dma_chan = tspi->tx_dma_chan;
703                 dma_phys = tspi->tx_dma_phys;
704                 tspi->tx_dma_buf = NULL;
705                 tspi->tx_dma_chan = NULL;
706         }
707         if (!dma_chan)
708                 return;
709
710         dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
711         dma_release_channel(dma_chan);
712 }
713
714 static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
715                 struct spi_transfer *t, bool is_first_of_msg)
716 {
717         struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
718         u32 speed = t->speed_hz;
719         u8 bits_per_word = t->bits_per_word;
720         u32 command1;
721         int req_mode;
722
723         if (speed != tspi->cur_speed) {
724                 clk_set_rate(tspi->clk, speed);
725                 tspi->cur_speed = speed;
726         }
727
728         tspi->cur_spi = spi;
729         tspi->cur_pos = 0;
730         tspi->cur_rx_pos = 0;
731         tspi->cur_tx_pos = 0;
732         tspi->curr_xfer = t;
733
734         if (is_first_of_msg) {
735                 tegra_spi_clear_status(tspi);
736
737                 command1 = tspi->def_command1_reg;
738                 command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
739
740                 command1 &= ~SPI_CONTROL_MODE_MASK;
741                 req_mode = spi->mode & 0x3;
742                 if (req_mode == SPI_MODE_0)
743                         command1 |= SPI_CONTROL_MODE_0;
744                 else if (req_mode == SPI_MODE_1)
745                         command1 |= SPI_CONTROL_MODE_1;
746                 else if (req_mode == SPI_MODE_2)
747                         command1 |= SPI_CONTROL_MODE_2;
748                 else if (req_mode == SPI_MODE_3)
749                         command1 |= SPI_CONTROL_MODE_3;
750
751                 if (spi->mode & SPI_LSB_FIRST)
752                         command1 |= SPI_LSBIT_FE;
753                 else
754                         command1 &= ~SPI_LSBIT_FE;
755
756                 if (spi->mode & SPI_3WIRE)
757                         command1 |= SPI_BIDIROE;
758                 else
759                         command1 &= ~SPI_BIDIROE;
760
761                 if (tspi->cs_control) {
762                         if (tspi->cs_control != spi)
763                                 tegra_spi_writel(tspi, command1, SPI_COMMAND1);
764                         tspi->cs_control = NULL;
765                 } else
766                         tegra_spi_writel(tspi, command1, SPI_COMMAND1);
767
768                 command1 |= SPI_CS_SW_HW;
769                 if (spi->mode & SPI_CS_HIGH)
770                         command1 |= SPI_CS_SW_VAL;
771                 else
772                         command1 &= ~SPI_CS_SW_VAL;
773
774                 tegra_spi_writel(tspi, 0, SPI_COMMAND2);
775         } else {
776                 command1 = tspi->command1_reg;
777                 command1 &= ~SPI_BIT_LENGTH(~0);
778                 command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
779         }
780
781         return command1;
782 }
783
784 static int tegra_spi_start_transfer_one(struct spi_device *spi,
785                 struct spi_transfer *t, u32 command1)
786 {
787         struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
788         unsigned total_fifo_words;
789         int ret;
790
791         total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t);
792
793         if (t->rx_nbits == SPI_NBITS_DUAL || t->tx_nbits == SPI_NBITS_DUAL)
794                 command1 |= SPI_BOTH_EN_BIT;
795         else
796                 command1 &= ~SPI_BOTH_EN_BIT;
797
798         if (tspi->is_packed)
799                 command1 |= SPI_PACKED;
800         else
801                 command1 &= ~SPI_PACKED;
802
803         command1 &= ~(SPI_CS_SEL_MASK | SPI_TX_EN | SPI_RX_EN);
804         tspi->cur_direction = 0;
805         if (t->rx_buf) {
806                 command1 |= SPI_RX_EN;
807                 tspi->cur_direction |= DATA_DIR_RX;
808         }
809         if (t->tx_buf) {
810                 command1 |= SPI_TX_EN;
811                 tspi->cur_direction |= DATA_DIR_TX;
812         }
813         command1 |= SPI_CS_SEL(spi->chip_select);
814         tegra_spi_writel(tspi, command1, SPI_COMMAND1);
815         tspi->command1_reg = command1;
816
817         dev_dbg(tspi->dev, "The def 0x%x and written 0x%x\n",
818                 tspi->def_command1_reg, (unsigned)command1);
819
820         ret = tegra_spi_flush_fifos(tspi);
821         if (ret < 0)
822                 return ret;
823         if (total_fifo_words > SPI_FIFO_DEPTH)
824                 ret = tegra_spi_start_dma_based_transfer(tspi, t);
825         else
826                 ret = tegra_spi_start_cpu_based_transfer(tspi, t);
827         return ret;
828 }
829
830 static int tegra_spi_setup(struct spi_device *spi)
831 {
832         struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
833         u32 val;
834         unsigned long flags;
835         int ret;
836
837         dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
838                 spi->bits_per_word,
839                 spi->mode & SPI_CPOL ? "" : "~",
840                 spi->mode & SPI_CPHA ? "" : "~",
841                 spi->max_speed_hz);
842
843         ret = pm_runtime_get_sync(tspi->dev);
844         if (ret < 0) {
845                 dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
846                 return ret;
847         }
848
849         if (tspi->soc_data->has_intr_mask_reg) {
850                 val = tegra_spi_readl(tspi, SPI_INTR_MASK);
851                 val &= ~SPI_INTR_ALL_MASK;
852                 tegra_spi_writel(tspi, val, SPI_INTR_MASK);
853         }
854
855         spin_lock_irqsave(&tspi->lock, flags);
856         val = tspi->def_command1_reg;
857         if (spi->mode & SPI_CS_HIGH)
858                 val &= ~SPI_CS_POL_INACTIVE(spi->chip_select);
859         else
860                 val |= SPI_CS_POL_INACTIVE(spi->chip_select);
861         tspi->def_command1_reg = val;
862         tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
863         spin_unlock_irqrestore(&tspi->lock, flags);
864
865         pm_runtime_put(tspi->dev);
866         return 0;
867 }
868
869 static void tegra_spi_transfer_delay(int delay)
870 {
871         if (!delay)
872                 return;
873
874         if (delay >= 1000)
875                 mdelay(delay / 1000);
876
877         udelay(delay % 1000);
878 }
879
880 static void tegra_spi_transfer_end(struct spi_device *spi)
881 {
882         struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
883         int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
884
885         if (cs_val)
886                 tspi->command1_reg |= SPI_CS_SW_VAL;
887         else
888                 tspi->command1_reg &= ~SPI_CS_SW_VAL;
889         tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
890         tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
891 }
892
893 static void tegra_spi_dump_regs(struct tegra_spi_data *tspi)
894 {
895         dev_dbg(tspi->dev, "============ SPI REGISTER DUMP ============\n");
896         dev_dbg(tspi->dev, "Command1:    0x%08x | Command2:    0x%08x\n",
897                 tegra_spi_readl(tspi, SPI_COMMAND1),
898                 tegra_spi_readl(tspi, SPI_COMMAND2));
899         dev_dbg(tspi->dev, "DMA_CTL:     0x%08x | DMA_BLK:     0x%08x\n",
900                 tegra_spi_readl(tspi, SPI_DMA_CTL),
901                 tegra_spi_readl(tspi, SPI_DMA_BLK));
902         dev_dbg(tspi->dev, "TRANS_STAT:  0x%08x | FIFO_STATUS: 0x%08x\n",
903                 tegra_spi_readl(tspi, SPI_TRANS_STATUS),
904                 tegra_spi_readl(tspi, SPI_FIFO_STATUS));
905 }
906
907 static int tegra_spi_transfer_one_message(struct spi_master *master,
908                         struct spi_message *msg)
909 {
910         bool is_first_msg = true;
911         struct tegra_spi_data *tspi = spi_master_get_devdata(master);
912         struct spi_transfer *xfer;
913         struct spi_device *spi = msg->spi;
914         int ret;
915         bool skip = false;
916
917         msg->status = 0;
918         msg->actual_length = 0;
919
920         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
921                 u32 cmd1;
922
923                 reinit_completion(&tspi->xfer_completion);
924
925                 cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg);
926
927                 if (!xfer->len) {
928                         ret = 0;
929                         skip = true;
930                         goto complete_xfer;
931                 }
932
933                 ret = tegra_spi_start_transfer_one(spi, xfer, cmd1);
934                 if (ret < 0) {
935                         dev_err(tspi->dev,
936                                 "spi can not start transfer, err %d\n", ret);
937                         goto complete_xfer;
938                 }
939
940                 is_first_msg = false;
941                 ret = wait_for_completion_timeout(&tspi->xfer_completion,
942                                                 SPI_DMA_TIMEOUT);
943                 if (WARN_ON(ret == 0)) {
944                         dev_err(tspi->dev,
945                                 "spi transfer timeout, err %d\n", ret);
946                         if (tspi->is_curr_dma_xfer &&
947                             (tspi->cur_direction & DATA_DIR_TX))
948                                 dmaengine_terminate_all(tspi->tx_dma_chan);
949                         if (tspi->is_curr_dma_xfer &&
950                             (tspi->cur_direction & DATA_DIR_RX))
951                                 dmaengine_terminate_all(tspi->rx_dma_chan);
952                         ret = -EIO;
953                         tegra_spi_dump_regs(tspi);
954                         tegra_spi_flush_fifos(tspi);
955                         reset_control_assert(tspi->rst);
956                         udelay(2);
957                         reset_control_deassert(tspi->rst);
958                         goto complete_xfer;
959                 }
960
961                 if (tspi->tx_status ||  tspi->rx_status) {
962                         dev_err(tspi->dev, "Error in Transfer\n");
963                         ret = -EIO;
964                         tegra_spi_dump_regs(tspi);
965                         goto complete_xfer;
966                 }
967                 msg->actual_length += xfer->len;
968
969 complete_xfer:
970                 if (ret < 0 || skip) {
971                         tegra_spi_transfer_end(spi);
972                         tegra_spi_transfer_delay(xfer->delay_usecs);
973                         goto exit;
974                 } else if (list_is_last(&xfer->transfer_list,
975                                         &msg->transfers)) {
976                         if (xfer->cs_change)
977                                 tspi->cs_control = spi;
978                         else {
979                                 tegra_spi_transfer_end(spi);
980                                 tegra_spi_transfer_delay(xfer->delay_usecs);
981                         }
982                 } else if (xfer->cs_change) {
983                         tegra_spi_transfer_end(spi);
984                         tegra_spi_transfer_delay(xfer->delay_usecs);
985                 }
986
987         }
988         ret = 0;
989 exit:
990         msg->status = ret;
991         spi_finalize_current_message(master);
992         return ret;
993 }
994
995 static irqreturn_t handle_cpu_based_xfer(struct tegra_spi_data *tspi)
996 {
997         struct spi_transfer *t = tspi->curr_xfer;
998         unsigned long flags;
999
1000         spin_lock_irqsave(&tspi->lock, flags);
1001         if (tspi->tx_status ||  tspi->rx_status) {
1002                 dev_err(tspi->dev, "CpuXfer ERROR bit set 0x%x\n",
1003                         tspi->status_reg);
1004                 dev_err(tspi->dev, "CpuXfer 0x%08x:0x%08x\n",
1005                         tspi->command1_reg, tspi->dma_control_reg);
1006                 tegra_spi_dump_regs(tspi);
1007                 tegra_spi_flush_fifos(tspi);
1008                 complete(&tspi->xfer_completion);
1009                 spin_unlock_irqrestore(&tspi->lock, flags);
1010                 reset_control_assert(tspi->rst);
1011                 udelay(2);
1012                 reset_control_deassert(tspi->rst);
1013                 return IRQ_HANDLED;
1014         }
1015
1016         if (tspi->cur_direction & DATA_DIR_RX)
1017                 tegra_spi_read_rx_fifo_to_client_rxbuf(tspi, t);
1018
1019         if (tspi->cur_direction & DATA_DIR_TX)
1020                 tspi->cur_pos = tspi->cur_tx_pos;
1021         else
1022                 tspi->cur_pos = tspi->cur_rx_pos;
1023
1024         if (tspi->cur_pos == t->len) {
1025                 complete(&tspi->xfer_completion);
1026                 goto exit;
1027         }
1028
1029         tegra_spi_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
1030         tegra_spi_start_cpu_based_transfer(tspi, t);
1031 exit:
1032         spin_unlock_irqrestore(&tspi->lock, flags);
1033         return IRQ_HANDLED;
1034 }
1035
1036 static irqreturn_t handle_dma_based_xfer(struct tegra_spi_data *tspi)
1037 {
1038         struct spi_transfer *t = tspi->curr_xfer;
1039         long wait_status;
1040         int err = 0;
1041         unsigned total_fifo_words;
1042         unsigned long flags;
1043
1044         /* Abort dmas if any error */
1045         if (tspi->cur_direction & DATA_DIR_TX) {
1046                 if (tspi->tx_status) {
1047                         dmaengine_terminate_all(tspi->tx_dma_chan);
1048                         err += 1;
1049                 } else {
1050                         wait_status = wait_for_completion_interruptible_timeout(
1051                                 &tspi->tx_dma_complete, SPI_DMA_TIMEOUT);
1052                         if (wait_status <= 0) {
1053                                 dmaengine_terminate_all(tspi->tx_dma_chan);
1054                                 dev_err(tspi->dev, "TxDma Xfer failed\n");
1055                                 err += 1;
1056                         }
1057                 }
1058         }
1059
1060         if (tspi->cur_direction & DATA_DIR_RX) {
1061                 if (tspi->rx_status) {
1062                         dmaengine_terminate_all(tspi->rx_dma_chan);
1063                         err += 2;
1064                 } else {
1065                         wait_status = wait_for_completion_interruptible_timeout(
1066                                 &tspi->rx_dma_complete, SPI_DMA_TIMEOUT);
1067                         if (wait_status <= 0) {
1068                                 dmaengine_terminate_all(tspi->rx_dma_chan);
1069                                 dev_err(tspi->dev, "RxDma Xfer failed\n");
1070                                 err += 2;
1071                         }
1072                 }
1073         }
1074
1075         spin_lock_irqsave(&tspi->lock, flags);
1076         if (err) {
1077                 dev_err(tspi->dev, "DmaXfer: ERROR bit set 0x%x\n",
1078                         tspi->status_reg);
1079                 dev_err(tspi->dev, "DmaXfer 0x%08x:0x%08x\n",
1080                         tspi->command1_reg, tspi->dma_control_reg);
1081                 tegra_spi_dump_regs(tspi);
1082                 tegra_spi_flush_fifos(tspi);
1083                 complete(&tspi->xfer_completion);
1084                 spin_unlock_irqrestore(&tspi->lock, flags);
1085                 reset_control_assert(tspi->rst);
1086                 udelay(2);
1087                 reset_control_deassert(tspi->rst);
1088                 return IRQ_HANDLED;
1089         }
1090
1091         if (tspi->cur_direction & DATA_DIR_RX)
1092                 tegra_spi_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
1093
1094         if (tspi->cur_direction & DATA_DIR_TX)
1095                 tspi->cur_pos = tspi->cur_tx_pos;
1096         else
1097                 tspi->cur_pos = tspi->cur_rx_pos;
1098
1099         if (tspi->cur_pos == t->len) {
1100                 complete(&tspi->xfer_completion);
1101                 goto exit;
1102         }
1103
1104         /* Continue transfer in current message */
1105         total_fifo_words = tegra_spi_calculate_curr_xfer_param(tspi->cur_spi,
1106                                                         tspi, t);
1107         if (total_fifo_words > SPI_FIFO_DEPTH)
1108                 err = tegra_spi_start_dma_based_transfer(tspi, t);
1109         else
1110                 err = tegra_spi_start_cpu_based_transfer(tspi, t);
1111
1112 exit:
1113         spin_unlock_irqrestore(&tspi->lock, flags);
1114         return IRQ_HANDLED;
1115 }
1116
1117 static irqreturn_t tegra_spi_isr_thread(int irq, void *context_data)
1118 {
1119         struct tegra_spi_data *tspi = context_data;
1120
1121         if (!tspi->is_curr_dma_xfer)
1122                 return handle_cpu_based_xfer(tspi);
1123         return handle_dma_based_xfer(tspi);
1124 }
1125
1126 static irqreturn_t tegra_spi_isr(int irq, void *context_data)
1127 {
1128         struct tegra_spi_data *tspi = context_data;
1129
1130         tspi->status_reg = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
1131         if (tspi->cur_direction & DATA_DIR_TX)
1132                 tspi->tx_status = tspi->status_reg &
1133                                         (SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF);
1134
1135         if (tspi->cur_direction & DATA_DIR_RX)
1136                 tspi->rx_status = tspi->status_reg &
1137                                         (SPI_RX_FIFO_OVF | SPI_RX_FIFO_UNF);
1138         tegra_spi_clear_status(tspi);
1139
1140         return IRQ_WAKE_THREAD;
1141 }
1142
1143 static struct tegra_spi_soc_data tegra114_spi_soc_data = {
1144         .has_intr_mask_reg = false,
1145 };
1146
1147 static struct tegra_spi_soc_data tegra124_spi_soc_data = {
1148         .has_intr_mask_reg = false,
1149 };
1150
1151 static struct tegra_spi_soc_data tegra210_spi_soc_data = {
1152         .has_intr_mask_reg = true,
1153 };
1154
1155 static const struct of_device_id tegra_spi_of_match[] = {
1156         {
1157                 .compatible = "nvidia,tegra114-spi",
1158                 .data       = &tegra114_spi_soc_data,
1159         }, {
1160                 .compatible = "nvidia,tegra124-spi",
1161                 .data       = &tegra124_spi_soc_data,
1162         }, {
1163                 .compatible = "nvidia,tegra210-spi",
1164                 .data       = &tegra210_spi_soc_data,
1165         },
1166         {}
1167 };
1168 MODULE_DEVICE_TABLE(of, tegra_spi_of_match);
1169
1170 static int tegra_spi_probe(struct platform_device *pdev)
1171 {
1172         struct spi_master       *master;
1173         struct tegra_spi_data   *tspi;
1174         struct resource         *r;
1175         int ret, spi_irq;
1176         int bus_num;
1177
1178         master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
1179         if (!master) {
1180                 dev_err(&pdev->dev, "master allocation failed\n");
1181                 return -ENOMEM;
1182         }
1183         platform_set_drvdata(pdev, master);
1184         tspi = spi_master_get_devdata(master);
1185
1186         if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
1187                                  &master->max_speed_hz))
1188                 master->max_speed_hz = 25000000; /* 25MHz */
1189
1190         /* the spi->mode bits understood by this driver: */
1191         master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
1192                             SPI_TX_DUAL | SPI_RX_DUAL | SPI_3WIRE;
1193         master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1194         master->setup = tegra_spi_setup;
1195         master->transfer_one_message = tegra_spi_transfer_one_message;
1196         master->num_chipselect = MAX_CHIP_SELECT;
1197         master->auto_runtime_pm = true;
1198         bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
1199         if (bus_num >= 0)
1200                 master->bus_num = bus_num;
1201
1202         tspi->master = master;
1203         tspi->dev = &pdev->dev;
1204         spin_lock_init(&tspi->lock);
1205
1206         tspi->soc_data = of_device_get_match_data(&pdev->dev);
1207         if (!tspi->soc_data) {
1208                 dev_err(&pdev->dev, "unsupported tegra\n");
1209                 ret = -ENODEV;
1210                 goto exit_free_master;
1211         }
1212
1213         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1214         tspi->base = devm_ioremap_resource(&pdev->dev, r);
1215         if (IS_ERR(tspi->base)) {
1216                 ret = PTR_ERR(tspi->base);
1217                 goto exit_free_master;
1218         }
1219         tspi->phys = r->start;
1220
1221         spi_irq = platform_get_irq(pdev, 0);
1222         tspi->irq = spi_irq;
1223
1224         tspi->clk = devm_clk_get(&pdev->dev, "spi");
1225         if (IS_ERR(tspi->clk)) {
1226                 dev_err(&pdev->dev, "can not get clock\n");
1227                 ret = PTR_ERR(tspi->clk);
1228                 goto exit_free_master;
1229         }
1230
1231         tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
1232         if (IS_ERR(tspi->rst)) {
1233                 dev_err(&pdev->dev, "can not get reset\n");
1234                 ret = PTR_ERR(tspi->rst);
1235                 goto exit_free_master;
1236         }
1237
1238         tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
1239         tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
1240
1241         ret = tegra_spi_init_dma_param(tspi, true);
1242         if (ret < 0)
1243                 goto exit_free_master;
1244         ret = tegra_spi_init_dma_param(tspi, false);
1245         if (ret < 0)
1246                 goto exit_rx_dma_free;
1247         tspi->max_buf_size = tspi->dma_buf_size;
1248         init_completion(&tspi->tx_dma_complete);
1249         init_completion(&tspi->rx_dma_complete);
1250
1251         init_completion(&tspi->xfer_completion);
1252
1253         pm_runtime_enable(&pdev->dev);
1254         if (!pm_runtime_enabled(&pdev->dev)) {
1255                 ret = tegra_spi_runtime_resume(&pdev->dev);
1256                 if (ret)
1257                         goto exit_pm_disable;
1258         }
1259
1260         ret = pm_runtime_get_sync(&pdev->dev);
1261         if (ret < 0) {
1262                 dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
1263                 goto exit_pm_disable;
1264         }
1265
1266         reset_control_assert(tspi->rst);
1267         udelay(2);
1268         reset_control_deassert(tspi->rst);
1269         tspi->def_command1_reg  = SPI_M_S;
1270         tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
1271         pm_runtime_put(&pdev->dev);
1272         ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
1273                                    tegra_spi_isr_thread, IRQF_ONESHOT,
1274                                    dev_name(&pdev->dev), tspi);
1275         if (ret < 0) {
1276                 dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
1277                         tspi->irq);
1278                 goto exit_pm_disable;
1279         }
1280
1281         master->dev.of_node = pdev->dev.of_node;
1282         ret = devm_spi_register_master(&pdev->dev, master);
1283         if (ret < 0) {
1284                 dev_err(&pdev->dev, "can not register to master err %d\n", ret);
1285                 goto exit_free_irq;
1286         }
1287         return ret;
1288
1289 exit_free_irq:
1290         free_irq(spi_irq, tspi);
1291 exit_pm_disable:
1292         pm_runtime_disable(&pdev->dev);
1293         if (!pm_runtime_status_suspended(&pdev->dev))
1294                 tegra_spi_runtime_suspend(&pdev->dev);
1295         tegra_spi_deinit_dma_param(tspi, false);
1296 exit_rx_dma_free:
1297         tegra_spi_deinit_dma_param(tspi, true);
1298 exit_free_master:
1299         spi_master_put(master);
1300         return ret;
1301 }
1302
1303 static int tegra_spi_remove(struct platform_device *pdev)
1304 {
1305         struct spi_master *master = platform_get_drvdata(pdev);
1306         struct tegra_spi_data   *tspi = spi_master_get_devdata(master);
1307
1308         free_irq(tspi->irq, tspi);
1309
1310         if (tspi->tx_dma_chan)
1311                 tegra_spi_deinit_dma_param(tspi, false);
1312
1313         if (tspi->rx_dma_chan)
1314                 tegra_spi_deinit_dma_param(tspi, true);
1315
1316         pm_runtime_disable(&pdev->dev);
1317         if (!pm_runtime_status_suspended(&pdev->dev))
1318                 tegra_spi_runtime_suspend(&pdev->dev);
1319
1320         return 0;
1321 }
1322
1323 #ifdef CONFIG_PM_SLEEP
1324 static int tegra_spi_suspend(struct device *dev)
1325 {
1326         struct spi_master *master = dev_get_drvdata(dev);
1327
1328         return spi_master_suspend(master);
1329 }
1330
1331 static int tegra_spi_resume(struct device *dev)
1332 {
1333         struct spi_master *master = dev_get_drvdata(dev);
1334         struct tegra_spi_data *tspi = spi_master_get_devdata(master);
1335         int ret;
1336
1337         ret = pm_runtime_get_sync(dev);
1338         if (ret < 0) {
1339                 dev_err(dev, "pm runtime failed, e = %d\n", ret);
1340                 return ret;
1341         }
1342         tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
1343         pm_runtime_put(dev);
1344
1345         return spi_master_resume(master);
1346 }
1347 #endif
1348
1349 static int tegra_spi_runtime_suspend(struct device *dev)
1350 {
1351         struct spi_master *master = dev_get_drvdata(dev);
1352         struct tegra_spi_data *tspi = spi_master_get_devdata(master);
1353
1354         /* Flush all write which are in PPSB queue by reading back */
1355         tegra_spi_readl(tspi, SPI_COMMAND1);
1356
1357         clk_disable_unprepare(tspi->clk);
1358         return 0;
1359 }
1360
1361 static int tegra_spi_runtime_resume(struct device *dev)
1362 {
1363         struct spi_master *master = dev_get_drvdata(dev);
1364         struct tegra_spi_data *tspi = spi_master_get_devdata(master);
1365         int ret;
1366
1367         ret = clk_prepare_enable(tspi->clk);
1368         if (ret < 0) {
1369                 dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
1370                 return ret;
1371         }
1372         return 0;
1373 }
1374
1375 static const struct dev_pm_ops tegra_spi_pm_ops = {
1376         SET_RUNTIME_PM_OPS(tegra_spi_runtime_suspend,
1377                 tegra_spi_runtime_resume, NULL)
1378         SET_SYSTEM_SLEEP_PM_OPS(tegra_spi_suspend, tegra_spi_resume)
1379 };
1380 static struct platform_driver tegra_spi_driver = {
1381         .driver = {
1382                 .name           = "spi-tegra114",
1383                 .pm             = &tegra_spi_pm_ops,
1384                 .of_match_table = tegra_spi_of_match,
1385         },
1386         .probe =        tegra_spi_probe,
1387         .remove =       tegra_spi_remove,
1388 };
1389 module_platform_driver(tegra_spi_driver);
1390
1391 MODULE_ALIAS("platform:spi-tegra114");
1392 MODULE_DESCRIPTION("NVIDIA Tegra114 SPI Controller Driver");
1393 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1394 MODULE_LICENSE("GPL v2");