]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/misc/cxl/guest.c
f5dc740fcd13aa1fd320b547783bd7437766f251
[linux.git] / drivers / misc / cxl / guest.c
1 /*
2  * Copyright 2015 IBM Corp.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9
10 #include <linux/spinlock.h>
11 #include <linux/uaccess.h>
12 #include <linux/delay.h>
13
14 #include "cxl.h"
15 #include "hcalls.h"
16 #include "trace.h"
17
18 #define CXL_ERROR_DETECTED_EVENT        1
19 #define CXL_SLOT_RESET_EVENT            2
20 #define CXL_RESUME_EVENT                3
21
22 static void pci_error_handlers(struct cxl_afu *afu,
23                                 int bus_error_event,
24                                 pci_channel_state_t state)
25 {
26         struct pci_dev *afu_dev;
27
28         if (afu->phb == NULL)
29                 return;
30
31         list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
32                 if (!afu_dev->driver)
33                         continue;
34
35                 switch (bus_error_event) {
36                 case CXL_ERROR_DETECTED_EVENT:
37                         afu_dev->error_state = state;
38
39                         if (afu_dev->driver->err_handler &&
40                             afu_dev->driver->err_handler->error_detected)
41                                 afu_dev->driver->err_handler->error_detected(afu_dev, state);
42                 break;
43                 case CXL_SLOT_RESET_EVENT:
44                         afu_dev->error_state = state;
45
46                         if (afu_dev->driver->err_handler &&
47                             afu_dev->driver->err_handler->slot_reset)
48                                 afu_dev->driver->err_handler->slot_reset(afu_dev);
49                 break;
50                 case CXL_RESUME_EVENT:
51                         if (afu_dev->driver->err_handler &&
52                             afu_dev->driver->err_handler->resume)
53                                 afu_dev->driver->err_handler->resume(afu_dev);
54                 break;
55                 }
56         }
57 }
58
59 static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr,
60                                         u64 errstat)
61 {
62         pr_devel("in %s\n", __func__);
63         dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
64
65         return cxl_ops->ack_irq(ctx, 0, errstat);
66 }
67
68 static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu,
69                         void *buf, size_t len)
70 {
71         unsigned int entries, mod;
72         unsigned long **vpd_buf = NULL;
73         struct sg_list *le;
74         int rc = 0, i, tocopy;
75         u64 out = 0;
76
77         if (buf == NULL)
78                 return -EINVAL;
79
80         /* number of entries in the list */
81         entries = len / SG_BUFFER_SIZE;
82         mod = len % SG_BUFFER_SIZE;
83         if (mod)
84                 entries++;
85
86         if (entries > SG_MAX_ENTRIES) {
87                 entries = SG_MAX_ENTRIES;
88                 len = SG_MAX_ENTRIES * SG_BUFFER_SIZE;
89                 mod = 0;
90         }
91
92         vpd_buf = kcalloc(entries, sizeof(unsigned long *), GFP_KERNEL);
93         if (!vpd_buf)
94                 return -ENOMEM;
95
96         le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
97         if (!le) {
98                 rc = -ENOMEM;
99                 goto err1;
100         }
101
102         for (i = 0; i < entries; i++) {
103                 vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
104                 if (!vpd_buf[i]) {
105                         rc = -ENOMEM;
106                         goto err2;
107                 }
108                 le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i]));
109                 le[i].len = cpu_to_be64(SG_BUFFER_SIZE);
110                 if ((i == (entries - 1)) && mod)
111                         le[i].len = cpu_to_be64(mod);
112         }
113
114         if (adapter)
115                 rc = cxl_h_collect_vpd_adapter(adapter->guest->handle,
116                                         virt_to_phys(le), entries, &out);
117         else
118                 rc = cxl_h_collect_vpd(afu->guest->handle, 0,
119                                 virt_to_phys(le), entries, &out);
120         pr_devel("length of available (entries: %i), vpd: %#llx\n",
121                 entries, out);
122
123         if (!rc) {
124                 /*
125                  * hcall returns in 'out' the size of available VPDs.
126                  * It fills the buffer with as much data as possible.
127                  */
128                 if (out < len)
129                         len = out;
130                 rc = len;
131                 if (out) {
132                         for (i = 0; i < entries; i++) {
133                                 if (len < SG_BUFFER_SIZE)
134                                         tocopy = len;
135                                 else
136                                         tocopy = SG_BUFFER_SIZE;
137                                 memcpy(buf, vpd_buf[i], tocopy);
138                                 buf += tocopy;
139                                 len -= tocopy;
140                         }
141                 }
142         }
143 err2:
144         for (i = 0; i < entries; i++) {
145                 if (vpd_buf[i])
146                         free_page((unsigned long) vpd_buf[i]);
147         }
148         free_page((unsigned long) le);
149 err1:
150         kfree(vpd_buf);
151         return rc;
152 }
153
154 static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info)
155 {
156         return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info);
157 }
158
159 static irqreturn_t guest_psl_irq(int irq, void *data)
160 {
161         struct cxl_context *ctx = data;
162         struct cxl_irq_info irq_info;
163         int rc;
164
165         pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq);
166         rc = guest_get_irq_info(ctx, &irq_info);
167         if (rc) {
168                 WARN(1, "Unable to get IRQ info: %i\n", rc);
169                 return IRQ_HANDLED;
170         }
171
172         rc = cxl_irq_psl8(irq, ctx, &irq_info);
173         return rc;
174 }
175
176 static int afu_read_error_state(struct cxl_afu *afu, int *state_out)
177 {
178         u64 state;
179         int rc = 0;
180
181         if (!afu)
182                 return -EIO;
183
184         rc = cxl_h_read_error_state(afu->guest->handle, &state);
185         if (!rc) {
186                 WARN_ON(state != H_STATE_NORMAL &&
187                         state != H_STATE_DISABLE &&
188                         state != H_STATE_TEMP_UNAVAILABLE &&
189                         state != H_STATE_PERM_UNAVAILABLE);
190                 *state_out = state & 0xffffffff;
191         }
192         return rc;
193 }
194
195 static irqreturn_t guest_slice_irq_err(int irq, void *data)
196 {
197         struct cxl_afu *afu = data;
198         int rc;
199         u64 serr, afu_error, dsisr;
200
201         rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr);
202         if (rc) {
203                 dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc);
204                 return IRQ_HANDLED;
205         }
206         afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
207         dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
208         cxl_afu_decode_psl_serr(afu, serr);
209         dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
210         dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
211
212         rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr);
213         if (rc)
214                 dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n",
215                         rc);
216
217         return IRQ_HANDLED;
218 }
219
220
221 static int irq_alloc_range(struct cxl *adapter, int len, int *irq)
222 {
223         int i, n;
224         struct irq_avail *cur;
225
226         for (i = 0; i < adapter->guest->irq_nranges; i++) {
227                 cur = &adapter->guest->irq_avail[i];
228                 n = bitmap_find_next_zero_area(cur->bitmap, cur->range,
229                                         0, len, 0);
230                 if (n < cur->range) {
231                         bitmap_set(cur->bitmap, n, len);
232                         *irq = cur->offset + n;
233                         pr_devel("guest: allocate IRQs %#x->%#x\n",
234                                 *irq, *irq + len - 1);
235
236                         return 0;
237                 }
238         }
239         return -ENOSPC;
240 }
241
242 static int irq_free_range(struct cxl *adapter, int irq, int len)
243 {
244         int i, n;
245         struct irq_avail *cur;
246
247         if (len == 0)
248                 return -ENOENT;
249
250         for (i = 0; i < adapter->guest->irq_nranges; i++) {
251                 cur = &adapter->guest->irq_avail[i];
252                 if (irq >= cur->offset &&
253                         (irq + len) <= (cur->offset + cur->range)) {
254                         n = irq - cur->offset;
255                         bitmap_clear(cur->bitmap, n, len);
256                         pr_devel("guest: release IRQs %#x->%#x\n",
257                                 irq, irq + len - 1);
258                         return 0;
259                 }
260         }
261         return -ENOENT;
262 }
263
264 static int guest_reset(struct cxl *adapter)
265 {
266         struct cxl_afu *afu = NULL;
267         int i, rc;
268
269         pr_devel("Adapter reset request\n");
270         for (i = 0; i < adapter->slices; i++) {
271                 if ((afu = adapter->afu[i])) {
272                         pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
273                                         pci_channel_io_frozen);
274                         cxl_context_detach_all(afu);
275                 }
276         }
277
278         rc = cxl_h_reset_adapter(adapter->guest->handle);
279         for (i = 0; i < adapter->slices; i++) {
280                 if (!rc && (afu = adapter->afu[i])) {
281                         pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
282                                         pci_channel_io_normal);
283                         pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
284                 }
285         }
286         return rc;
287 }
288
289 static int guest_alloc_one_irq(struct cxl *adapter)
290 {
291         int irq;
292
293         spin_lock(&adapter->guest->irq_alloc_lock);
294         if (irq_alloc_range(adapter, 1, &irq))
295                 irq = -ENOSPC;
296         spin_unlock(&adapter->guest->irq_alloc_lock);
297         return irq;
298 }
299
300 static void guest_release_one_irq(struct cxl *adapter, int irq)
301 {
302         spin_lock(&adapter->guest->irq_alloc_lock);
303         irq_free_range(adapter, irq, 1);
304         spin_unlock(&adapter->guest->irq_alloc_lock);
305 }
306
307 static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
308                                 struct cxl *adapter, unsigned int num)
309 {
310         int i, try, irq;
311
312         memset(irqs, 0, sizeof(struct cxl_irq_ranges));
313
314         spin_lock(&adapter->guest->irq_alloc_lock);
315         for (i = 0; i < CXL_IRQ_RANGES && num; i++) {
316                 try = num;
317                 while (try) {
318                         if (irq_alloc_range(adapter, try, &irq) == 0)
319                                 break;
320                         try /= 2;
321                 }
322                 if (!try)
323                         goto error;
324                 irqs->offset[i] = irq;
325                 irqs->range[i] = try;
326                 num -= try;
327         }
328         if (num)
329                 goto error;
330         spin_unlock(&adapter->guest->irq_alloc_lock);
331         return 0;
332
333 error:
334         for (i = 0; i < CXL_IRQ_RANGES; i++)
335                 irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
336         spin_unlock(&adapter->guest->irq_alloc_lock);
337         return -ENOSPC;
338 }
339
340 static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs,
341                                 struct cxl *adapter)
342 {
343         int i;
344
345         spin_lock(&adapter->guest->irq_alloc_lock);
346         for (i = 0; i < CXL_IRQ_RANGES; i++)
347                 irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
348         spin_unlock(&adapter->guest->irq_alloc_lock);
349 }
350
351 static int guest_register_serr_irq(struct cxl_afu *afu)
352 {
353         afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
354                                       dev_name(&afu->dev));
355         if (!afu->err_irq_name)
356                 return -ENOMEM;
357
358         if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq,
359                                  guest_slice_irq_err, afu, afu->err_irq_name))) {
360                 kfree(afu->err_irq_name);
361                 afu->err_irq_name = NULL;
362                 return -ENOMEM;
363         }
364
365         return 0;
366 }
367
368 static void guest_release_serr_irq(struct cxl_afu *afu)
369 {
370         cxl_unmap_irq(afu->serr_virq, afu);
371         cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
372         kfree(afu->err_irq_name);
373 }
374
375 static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
376 {
377         return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token,
378                                 tfc >> 32, (psl_reset_mask != 0));
379 }
380
381 static void disable_afu_irqs(struct cxl_context *ctx)
382 {
383         irq_hw_number_t hwirq;
384         unsigned int virq;
385         int r, i;
386
387         pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice);
388         for (r = 0; r < CXL_IRQ_RANGES; r++) {
389                 hwirq = ctx->irqs.offset[r];
390                 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
391                         virq = irq_find_mapping(NULL, hwirq);
392                         disable_irq(virq);
393                 }
394         }
395 }
396
397 static void enable_afu_irqs(struct cxl_context *ctx)
398 {
399         irq_hw_number_t hwirq;
400         unsigned int virq;
401         int r, i;
402
403         pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice);
404         for (r = 0; r < CXL_IRQ_RANGES; r++) {
405                 hwirq = ctx->irqs.offset[r];
406                 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
407                         virq = irq_find_mapping(NULL, hwirq);
408                         enable_irq(virq);
409                 }
410         }
411 }
412
413 static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx,
414                         u64 offset, u64 *val)
415 {
416         unsigned long cr;
417         char c;
418         int rc = 0;
419
420         if (afu->crs_len < sz)
421                 return -ENOENT;
422
423         if (unlikely(offset >= afu->crs_len))
424                 return -ERANGE;
425
426         cr = get_zeroed_page(GFP_KERNEL);
427         if (!cr)
428                 return -ENOMEM;
429
430         rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset,
431                         virt_to_phys((void *)cr), sz);
432         if (rc)
433                 goto err;
434
435         switch (sz) {
436         case 1:
437                 c = *((char *) cr);
438                 *val = c;
439                 break;
440         case 2:
441                 *val = in_le16((u16 *)cr);
442                 break;
443         case 4:
444                 *val = in_le32((unsigned *)cr);
445                 break;
446         case 8:
447                 *val = in_le64((u64 *)cr);
448                 break;
449         default:
450                 WARN_ON(1);
451         }
452 err:
453         free_page(cr);
454         return rc;
455 }
456
457 static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset,
458                         u32 *out)
459 {
460         int rc;
461         u64 val;
462
463         rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val);
464         if (!rc)
465                 *out = (u32) val;
466         return rc;
467 }
468
469 static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset,
470                         u16 *out)
471 {
472         int rc;
473         u64 val;
474
475         rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val);
476         if (!rc)
477                 *out = (u16) val;
478         return rc;
479 }
480
481 static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset,
482                         u8 *out)
483 {
484         int rc;
485         u64 val;
486
487         rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val);
488         if (!rc)
489                 *out = (u8) val;
490         return rc;
491 }
492
493 static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset,
494                         u64 *out)
495 {
496         return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out);
497 }
498
499 static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
500 {
501         /* config record is not writable from guest */
502         return -EPERM;
503 }
504
505 static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
506 {
507         /* config record is not writable from guest */
508         return -EPERM;
509 }
510
511 static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
512 {
513         /* config record is not writable from guest */
514         return -EPERM;
515 }
516
517 static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
518 {
519         struct cxl_process_element_hcall *elem;
520         struct cxl *adapter = ctx->afu->adapter;
521         const struct cred *cred;
522         u32 pid, idx;
523         int rc, r, i;
524         u64 mmio_addr, mmio_size;
525         __be64 flags = 0;
526
527         /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */
528         if (!(elem = (struct cxl_process_element_hcall *)
529                         get_zeroed_page(GFP_KERNEL)))
530                 return -ENOMEM;
531
532         elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION);
533         if (ctx->kernel) {
534                 pid = 0;
535                 flags |= CXL_PE_TRANSLATION_ENABLED;
536                 flags |= CXL_PE_PRIVILEGED_PROCESS;
537                 if (mfmsr() & MSR_SF)
538                         flags |= CXL_PE_64_BIT;
539         } else {
540                 pid = current->pid;
541                 flags |= CXL_PE_PROBLEM_STATE;
542                 flags |= CXL_PE_TRANSLATION_ENABLED;
543                 if (!test_tsk_thread_flag(current, TIF_32BIT))
544                         flags |= CXL_PE_64_BIT;
545                 cred = get_current_cred();
546                 if (uid_eq(cred->euid, GLOBAL_ROOT_UID))
547                         flags |= CXL_PE_PRIVILEGED_PROCESS;
548                 put_cred(cred);
549         }
550         elem->flags         = cpu_to_be64(flags);
551         elem->common.tid    = cpu_to_be32(0); /* Unused */
552         elem->common.pid    = cpu_to_be32(pid);
553         elem->common.csrp   = cpu_to_be64(0); /* disable */
554         elem->common.u.psl8.aurp0  = cpu_to_be64(0); /* disable */
555         elem->common.u.psl8.aurp1  = cpu_to_be64(0); /* disable */
556
557         cxl_prefault(ctx, wed);
558
559         elem->common.u.psl8.sstp0  = cpu_to_be64(ctx->sstp0);
560         elem->common.u.psl8.sstp1  = cpu_to_be64(ctx->sstp1);
561
562         /*
563          * Ensure we have at least one interrupt allocated to take faults for
564          * kernel contexts that may not have allocated any AFU IRQs at all:
565          */
566         if (ctx->irqs.range[0] == 0) {
567                 rc = afu_register_irqs(ctx, 0);
568                 if (rc)
569                         goto out_free;
570         }
571
572         for (r = 0; r < CXL_IRQ_RANGES; r++) {
573                 for (i = 0; i < ctx->irqs.range[r]; i++) {
574                         if (r == 0 && i == 0) {
575                                 elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]);
576                         } else {
577                                 idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset;
578                                 elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8);
579                         }
580                 }
581         }
582         elem->common.amr = cpu_to_be64(amr);
583         elem->common.wed = cpu_to_be64(wed);
584
585         disable_afu_irqs(ctx);
586
587         rc = cxl_h_attach_process(ctx->afu->guest->handle, elem,
588                                 &ctx->process_token, &mmio_addr, &mmio_size);
589         if (rc == H_SUCCESS) {
590                 if (ctx->master || !ctx->afu->pp_psa) {
591                         ctx->psn_phys = ctx->afu->psn_phys;
592                         ctx->psn_size = ctx->afu->adapter->ps_size;
593                 } else {
594                         ctx->psn_phys = mmio_addr;
595                         ctx->psn_size = mmio_size;
596                 }
597                 if (ctx->afu->pp_psa && mmio_size &&
598                         ctx->afu->pp_size == 0) {
599                         /*
600                          * There's no property in the device tree to read the
601                          * pp_size. We only find out at the 1st attach.
602                          * Compared to bare-metal, it is too late and we
603                          * should really lock here. However, on powerVM,
604                          * pp_size is really only used to display in /sys.
605                          * Being discussed with pHyp for their next release.
606                          */
607                         ctx->afu->pp_size = mmio_size;
608                 }
609                 /* from PAPR: process element is bytes 4-7 of process token */
610                 ctx->external_pe = ctx->process_token & 0xFFFFFFFF;
611                 pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx",
612                         ctx->pe, ctx->external_pe, ctx->psn_size);
613                 ctx->pe_inserted = true;
614                 enable_afu_irqs(ctx);
615         }
616
617 out_free:
618         free_page((u64)elem);
619         return rc;
620 }
621
622 static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
623 {
624         pr_devel("in %s\n", __func__);
625
626         ctx->kernel = kernel;
627         if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
628                 return attach_afu_directed(ctx, wed, amr);
629
630         /* dedicated mode not supported on FW840 */
631
632         return -EINVAL;
633 }
634
635 static int detach_afu_directed(struct cxl_context *ctx)
636 {
637         if (!ctx->pe_inserted)
638                 return 0;
639         if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token))
640                 return -1;
641         return 0;
642 }
643
644 static int guest_detach_process(struct cxl_context *ctx)
645 {
646         pr_devel("in %s\n", __func__);
647         trace_cxl_detach(ctx);
648
649         if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
650                 return -EIO;
651
652         if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
653                 return detach_afu_directed(ctx);
654
655         return -EINVAL;
656 }
657
658 static void guest_release_afu(struct device *dev)
659 {
660         struct cxl_afu *afu = to_cxl_afu(dev);
661
662         pr_devel("%s\n", __func__);
663
664         idr_destroy(&afu->contexts_idr);
665
666         kfree(afu->guest);
667         kfree(afu);
668 }
669
670 ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len)
671 {
672         return guest_collect_vpd(NULL, afu, buf, len);
673 }
674
675 #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
676 static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
677                                         loff_t off, size_t count)
678 {
679         void *tbuf = NULL;
680         int rc = 0;
681
682         tbuf = (void *) get_zeroed_page(GFP_KERNEL);
683         if (!tbuf)
684                 return -ENOMEM;
685
686         rc = cxl_h_get_afu_err(afu->guest->handle,
687                                off & 0x7,
688                                virt_to_phys(tbuf),
689                                count);
690         if (rc)
691                 goto err;
692
693         if (count > ERR_BUFF_MAX_COPY_SIZE)
694                 count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
695         memcpy(buf, tbuf, count);
696 err:
697         free_page((u64)tbuf);
698
699         return rc;
700 }
701
702 static int guest_afu_check_and_enable(struct cxl_afu *afu)
703 {
704         return 0;
705 }
706
707 static bool guest_support_attributes(const char *attr_name,
708                                      enum cxl_attrs type)
709 {
710         switch (type) {
711         case CXL_ADAPTER_ATTRS:
712                 if ((strcmp(attr_name, "base_image") == 0) ||
713                         (strcmp(attr_name, "load_image_on_perst") == 0) ||
714                         (strcmp(attr_name, "perst_reloads_same_image") == 0) ||
715                         (strcmp(attr_name, "image_loaded") == 0))
716                         return false;
717                 break;
718         case CXL_AFU_MASTER_ATTRS:
719                 if ((strcmp(attr_name, "pp_mmio_off") == 0))
720                         return false;
721                 break;
722         case CXL_AFU_ATTRS:
723                 break;
724         default:
725                 break;
726         }
727
728         return true;
729 }
730
731 static int activate_afu_directed(struct cxl_afu *afu)
732 {
733         int rc;
734
735         dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice);
736
737         afu->current_mode = CXL_MODE_DIRECTED;
738
739         afu->num_procs = afu->max_procs_virtualised;
740
741         if ((rc = cxl_chardev_m_afu_add(afu)))
742                 return rc;
743
744         if ((rc = cxl_sysfs_afu_m_add(afu)))
745                 goto err;
746
747         if ((rc = cxl_chardev_s_afu_add(afu)))
748                 goto err1;
749
750         return 0;
751 err1:
752         cxl_sysfs_afu_m_remove(afu);
753 err:
754         cxl_chardev_afu_remove(afu);
755         return rc;
756 }
757
758 static int guest_afu_activate_mode(struct cxl_afu *afu, int mode)
759 {
760         if (!mode)
761                 return 0;
762         if (!(mode & afu->modes_supported))
763                 return -EINVAL;
764
765         if (mode == CXL_MODE_DIRECTED)
766                 return activate_afu_directed(afu);
767
768         if (mode == CXL_MODE_DEDICATED)
769                 dev_err(&afu->dev, "Dedicated mode not supported\n");
770
771         return -EINVAL;
772 }
773
774 static int deactivate_afu_directed(struct cxl_afu *afu)
775 {
776         dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice);
777
778         afu->current_mode = 0;
779         afu->num_procs = 0;
780
781         cxl_sysfs_afu_m_remove(afu);
782         cxl_chardev_afu_remove(afu);
783
784         cxl_ops->afu_reset(afu);
785
786         return 0;
787 }
788
789 static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode)
790 {
791         if (!mode)
792                 return 0;
793         if (!(mode & afu->modes_supported))
794                 return -EINVAL;
795
796         if (mode == CXL_MODE_DIRECTED)
797                 return deactivate_afu_directed(afu);
798         return 0;
799 }
800
801 static int guest_afu_reset(struct cxl_afu *afu)
802 {
803         pr_devel("AFU(%d) reset request\n", afu->slice);
804         return cxl_h_reset_afu(afu->guest->handle);
805 }
806
807 static int guest_map_slice_regs(struct cxl_afu *afu)
808 {
809         if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) {
810                 dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n",
811                         afu->slice);
812                 return -ENOMEM;
813         }
814         return 0;
815 }
816
817 static void guest_unmap_slice_regs(struct cxl_afu *afu)
818 {
819         if (afu->p2n_mmio)
820                 iounmap(afu->p2n_mmio);
821 }
822
823 static int afu_update_state(struct cxl_afu *afu)
824 {
825         int rc, cur_state;
826
827         rc = afu_read_error_state(afu, &cur_state);
828         if (rc)
829                 return rc;
830
831         if (afu->guest->previous_state == cur_state)
832                 return 0;
833
834         pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state);
835
836         switch (cur_state) {
837         case H_STATE_NORMAL:
838                 afu->guest->previous_state = cur_state;
839                 break;
840
841         case H_STATE_DISABLE:
842                 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
843                                 pci_channel_io_frozen);
844
845                 cxl_context_detach_all(afu);
846                 if ((rc = cxl_ops->afu_reset(afu)))
847                         pr_devel("reset hcall failed %d\n", rc);
848
849                 rc = afu_read_error_state(afu, &cur_state);
850                 if (!rc && cur_state == H_STATE_NORMAL) {
851                         pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
852                                         pci_channel_io_normal);
853                         pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
854                 }
855                 afu->guest->previous_state = 0;
856                 break;
857
858         case H_STATE_TEMP_UNAVAILABLE:
859                 afu->guest->previous_state = cur_state;
860                 break;
861
862         case H_STATE_PERM_UNAVAILABLE:
863                 dev_err(&afu->dev, "AFU is in permanent error state\n");
864                 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
865                                 pci_channel_io_perm_failure);
866                 afu->guest->previous_state = cur_state;
867                 break;
868
869         default:
870                 pr_err("Unexpected AFU(%d) error state: %#x\n",
871                        afu->slice, cur_state);
872                 return -EINVAL;
873         }
874
875         return rc;
876 }
877
878 static void afu_handle_errstate(struct work_struct *work)
879 {
880         struct cxl_afu_guest *afu_guest =
881                 container_of(to_delayed_work(work), struct cxl_afu_guest, work_err);
882
883         if (!afu_update_state(afu_guest->parent) &&
884             afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE)
885                 return;
886
887         if (afu_guest->handle_err)
888                 schedule_delayed_work(&afu_guest->work_err,
889                                       msecs_to_jiffies(3000));
890 }
891
892 static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu)
893 {
894         int state;
895
896         if (afu && (!afu_read_error_state(afu, &state))) {
897                 if (state == H_STATE_NORMAL)
898                         return true;
899         }
900
901         return false;
902 }
903
904 static int afu_properties_look_ok(struct cxl_afu *afu)
905 {
906         if (afu->pp_irqs < 0) {
907                 dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n");
908                 return -EINVAL;
909         }
910
911         if (afu->max_procs_virtualised < 1) {
912                 dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n");
913                 return -EINVAL;
914         }
915
916         if (afu->crs_len < 0) {
917                 dev_err(&afu->dev, "Unexpected configuration record size value\n");
918                 return -EINVAL;
919         }
920
921         return 0;
922 }
923
924 int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np)
925 {
926         struct cxl_afu *afu;
927         bool free = true;
928         int rc;
929
930         pr_devel("in %s - AFU(%d)\n", __func__, slice);
931         if (!(afu = cxl_alloc_afu(adapter, slice)))
932                 return -ENOMEM;
933
934         if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) {
935                 kfree(afu);
936                 return -ENOMEM;
937         }
938
939         if ((rc = dev_set_name(&afu->dev, "afu%i.%i",
940                                           adapter->adapter_num,
941                                           slice)))
942                 goto err1;
943
944         adapter->slices++;
945
946         if ((rc = cxl_of_read_afu_handle(afu, afu_np)))
947                 goto err1;
948
949         if ((rc = cxl_ops->afu_reset(afu)))
950                 goto err1;
951
952         if ((rc = cxl_of_read_afu_properties(afu, afu_np)))
953                 goto err1;
954
955         if ((rc = afu_properties_look_ok(afu)))
956                 goto err1;
957
958         if ((rc = guest_map_slice_regs(afu)))
959                 goto err1;
960
961         if ((rc = guest_register_serr_irq(afu)))
962                 goto err2;
963
964         /*
965          * After we call this function we must not free the afu directly, even
966          * if it returns an error!
967          */
968         if ((rc = cxl_register_afu(afu)))
969                 goto err_put1;
970
971         if ((rc = cxl_sysfs_afu_add(afu)))
972                 goto err_put1;
973
974         /*
975          * pHyp doesn't expose the programming models supported by the
976          * AFU. pHyp currently only supports directed mode. If it adds
977          * dedicated mode later, this version of cxl has no way to
978          * detect it. So we'll initialize the driver, but the first
979          * attach will fail.
980          * Being discussed with pHyp to do better (likely new property)
981          */
982         if (afu->max_procs_virtualised == 1)
983                 afu->modes_supported = CXL_MODE_DEDICATED;
984         else
985                 afu->modes_supported = CXL_MODE_DIRECTED;
986
987         if ((rc = cxl_afu_select_best_mode(afu)))
988                 goto err_put2;
989
990         adapter->afu[afu->slice] = afu;
991
992         afu->enabled = true;
993
994         /*
995          * wake up the cpu periodically to check the state
996          * of the AFU using "afu" stored in the guest structure.
997          */
998         afu->guest->parent = afu;
999         afu->guest->handle_err = true;
1000         INIT_DELAYED_WORK(&afu->guest->work_err, afu_handle_errstate);
1001         schedule_delayed_work(&afu->guest->work_err, msecs_to_jiffies(1000));
1002
1003         if ((rc = cxl_pci_vphb_add(afu)))
1004                 dev_info(&afu->dev, "Can't register vPHB\n");
1005
1006         return 0;
1007
1008 err_put2:
1009         cxl_sysfs_afu_remove(afu);
1010 err_put1:
1011         device_unregister(&afu->dev);
1012         free = false;
1013         guest_release_serr_irq(afu);
1014 err2:
1015         guest_unmap_slice_regs(afu);
1016 err1:
1017         if (free) {
1018                 kfree(afu->guest);
1019                 kfree(afu);
1020         }
1021         return rc;
1022 }
1023
1024 void cxl_guest_remove_afu(struct cxl_afu *afu)
1025 {
1026         pr_devel("in %s - AFU(%d)\n", __func__, afu->slice);
1027
1028         if (!afu)
1029                 return;
1030
1031         /* flush and stop pending job */
1032         afu->guest->handle_err = false;
1033         flush_delayed_work(&afu->guest->work_err);
1034
1035         cxl_pci_vphb_remove(afu);
1036         cxl_sysfs_afu_remove(afu);
1037
1038         spin_lock(&afu->adapter->afu_list_lock);
1039         afu->adapter->afu[afu->slice] = NULL;
1040         spin_unlock(&afu->adapter->afu_list_lock);
1041
1042         cxl_context_detach_all(afu);
1043         cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
1044         guest_release_serr_irq(afu);
1045         guest_unmap_slice_regs(afu);
1046
1047         device_unregister(&afu->dev);
1048 }
1049
1050 static void free_adapter(struct cxl *adapter)
1051 {
1052         struct irq_avail *cur;
1053         int i;
1054
1055         if (adapter->guest) {
1056                 if (adapter->guest->irq_avail) {
1057                         for (i = 0; i < adapter->guest->irq_nranges; i++) {
1058                                 cur = &adapter->guest->irq_avail[i];
1059                                 kfree(cur->bitmap);
1060                         }
1061                         kfree(adapter->guest->irq_avail);
1062                 }
1063                 kfree(adapter->guest->status);
1064                 kfree(adapter->guest);
1065         }
1066         cxl_remove_adapter_nr(adapter);
1067         kfree(adapter);
1068 }
1069
1070 static int properties_look_ok(struct cxl *adapter)
1071 {
1072         /* The absence of this property means that the operational
1073          * status is unknown or okay
1074          */
1075         if (strlen(adapter->guest->status) &&
1076             strcmp(adapter->guest->status, "okay")) {
1077                 pr_err("ABORTING:Bad operational status of the device\n");
1078                 return -EINVAL;
1079         }
1080
1081         return 0;
1082 }
1083
1084 ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
1085 {
1086         return guest_collect_vpd(adapter, NULL, buf, len);
1087 }
1088
1089 void cxl_guest_remove_adapter(struct cxl *adapter)
1090 {
1091         pr_devel("in %s\n", __func__);
1092
1093         cxl_sysfs_adapter_remove(adapter);
1094
1095         cxl_guest_remove_chardev(adapter);
1096         device_unregister(&adapter->dev);
1097 }
1098
1099 static void release_adapter(struct device *dev)
1100 {
1101         free_adapter(to_cxl_adapter(dev));
1102 }
1103
1104 struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev)
1105 {
1106         struct cxl *adapter;
1107         bool free = true;
1108         int rc;
1109
1110         if (!(adapter = cxl_alloc_adapter()))
1111                 return ERR_PTR(-ENOMEM);
1112
1113         if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) {
1114                 free_adapter(adapter);
1115                 return ERR_PTR(-ENOMEM);
1116         }
1117
1118         adapter->slices = 0;
1119         adapter->guest->pdev = pdev;
1120         adapter->dev.parent = &pdev->dev;
1121         adapter->dev.release = release_adapter;
1122         dev_set_drvdata(&pdev->dev, adapter);
1123
1124         /*
1125          * Hypervisor controls PSL timebase initialization (p1 register).
1126          * On FW840, PSL is initialized.
1127          */
1128         adapter->psl_timebase_synced = true;
1129
1130         if ((rc = cxl_of_read_adapter_handle(adapter, np)))
1131                 goto err1;
1132
1133         if ((rc = cxl_of_read_adapter_properties(adapter, np)))
1134                 goto err1;
1135
1136         if ((rc = properties_look_ok(adapter)))
1137                 goto err1;
1138
1139         if ((rc = cxl_guest_add_chardev(adapter)))
1140                 goto err1;
1141
1142         /*
1143          * After we call this function we must not free the adapter directly,
1144          * even if it returns an error!
1145          */
1146         if ((rc = cxl_register_adapter(adapter)))
1147                 goto err_put1;
1148
1149         if ((rc = cxl_sysfs_adapter_add(adapter)))
1150                 goto err_put1;
1151
1152         /* release the context lock as the adapter is configured */
1153         cxl_adapter_context_unlock(adapter);
1154
1155         return adapter;
1156
1157 err_put1:
1158         device_unregister(&adapter->dev);
1159         free = false;
1160         cxl_guest_remove_chardev(adapter);
1161 err1:
1162         if (free)
1163                 free_adapter(adapter);
1164         return ERR_PTR(rc);
1165 }
1166
1167 void cxl_guest_reload_module(struct cxl *adapter)
1168 {
1169         struct platform_device *pdev;
1170
1171         pdev = adapter->guest->pdev;
1172         cxl_guest_remove_adapter(adapter);
1173
1174         cxl_of_probe(pdev);
1175 }
1176
1177 const struct cxl_backend_ops cxl_guest_ops = {
1178         .module = THIS_MODULE,
1179         .adapter_reset = guest_reset,
1180         .alloc_one_irq = guest_alloc_one_irq,
1181         .release_one_irq = guest_release_one_irq,
1182         .alloc_irq_ranges = guest_alloc_irq_ranges,
1183         .release_irq_ranges = guest_release_irq_ranges,
1184         .setup_irq = NULL,
1185         .handle_psl_slice_error = guest_handle_psl_slice_error,
1186         .psl_interrupt = guest_psl_irq,
1187         .ack_irq = guest_ack_irq,
1188         .attach_process = guest_attach_process,
1189         .detach_process = guest_detach_process,
1190         .update_ivtes = NULL,
1191         .support_attributes = guest_support_attributes,
1192         .link_ok = guest_link_ok,
1193         .release_afu = guest_release_afu,
1194         .afu_read_err_buffer = guest_afu_read_err_buffer,
1195         .afu_check_and_enable = guest_afu_check_and_enable,
1196         .afu_activate_mode = guest_afu_activate_mode,
1197         .afu_deactivate_mode = guest_afu_deactivate_mode,
1198         .afu_reset = guest_afu_reset,
1199         .afu_cr_read8 = guest_afu_cr_read8,
1200         .afu_cr_read16 = guest_afu_cr_read16,
1201         .afu_cr_read32 = guest_afu_cr_read32,
1202         .afu_cr_read64 = guest_afu_cr_read64,
1203         .afu_cr_write8 = guest_afu_cr_write8,
1204         .afu_cr_write16 = guest_afu_cr_write16,
1205         .afu_cr_write32 = guest_afu_cr_write32,
1206         .read_adapter_vpd = cxl_guest_read_adapter_vpd,
1207 };