2 * Copyright(c) 2016 - 2018 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/slab.h>
49 #include <linux/vmalloc.h>
54 static struct workqueue_struct *comp_vector_wq;
57 * rvt_cq_enter - add a new entry to the completion queue
58 * @cq: completion queue
59 * @entry: work completion entry to add
60 * @solicited: true if @entry is solicited
62 * This may be called with qp->s_lock held.
64 void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
71 spin_lock_irqsave(&cq->lock, flags);
74 * Note that the head pointer might be writable by user processes.
75 * Take care to verify it is a sane value.
79 if (head >= (unsigned)cq->ibcq.cqe) {
86 if (unlikely(next == wc->tail)) {
87 spin_unlock_irqrestore(&cq->lock, flags);
88 if (cq->ibcq.event_handler) {
91 ev.device = cq->ibcq.device;
92 ev.element.cq = &cq->ibcq;
93 ev.event = IB_EVENT_CQ_ERR;
94 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
98 trace_rvt_cq_enter(cq, entry, head);
100 wc->uqueue[head].wr_id = entry->wr_id;
101 wc->uqueue[head].status = entry->status;
102 wc->uqueue[head].opcode = entry->opcode;
103 wc->uqueue[head].vendor_err = entry->vendor_err;
104 wc->uqueue[head].byte_len = entry->byte_len;
105 wc->uqueue[head].ex.imm_data = entry->ex.imm_data;
106 wc->uqueue[head].qp_num = entry->qp->qp_num;
107 wc->uqueue[head].src_qp = entry->src_qp;
108 wc->uqueue[head].wc_flags = entry->wc_flags;
109 wc->uqueue[head].pkey_index = entry->pkey_index;
110 wc->uqueue[head].slid = ib_lid_cpu16(entry->slid);
111 wc->uqueue[head].sl = entry->sl;
112 wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
113 wc->uqueue[head].port_num = entry->port_num;
114 /* Make sure entry is written before the head index. */
117 wc->kqueue[head] = *entry;
121 if (cq->notify == IB_CQ_NEXT_COMP ||
122 (cq->notify == IB_CQ_SOLICITED &&
123 (solicited || entry->status != IB_WC_SUCCESS))) {
125 * This will cause send_complete() to be called in
128 cq->notify = RVT_CQ_NONE;
130 queue_work_on(cq->comp_vector_cpu, comp_vector_wq,
134 spin_unlock_irqrestore(&cq->lock, flags);
136 EXPORT_SYMBOL(rvt_cq_enter);
138 static void send_complete(struct work_struct *work)
140 struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask);
143 * The completion handler will most likely rearm the notification
144 * and poll for all pending entries. If a new completion entry
145 * is added while we are in this routine, queue_work()
146 * won't call us again until we return so we check triggered to
147 * see if we need to call the handler again.
150 u8 triggered = cq->triggered;
153 * IPoIB connected mode assumes the callback is from a
154 * soft IRQ. We simulate this by blocking "bottom halves".
155 * See the implementation for ipoib_cm_handle_tx_wc(),
156 * netif_tx_lock_bh() and netif_tx_lock().
159 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
162 if (cq->triggered == triggered)
168 * rvt_create_cq - create a completion queue
169 * @ibdev: the device this completion queue is attached to
170 * @attr: creation attributes
171 * @udata: user data for libibverbs.so
173 * Called by ib_create_cq() in the generic verbs code.
175 * Return: pointer to the completion queue or negative errno values
178 struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
179 const struct ib_cq_init_attr *attr,
180 struct ib_udata *udata)
182 struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
184 struct rvt_cq_wc *wc;
187 unsigned int entries = attr->cqe;
188 int comp_vector = attr->comp_vector;
191 return ERR_PTR(-EINVAL);
193 if (entries < 1 || entries > rdi->dparms.props.max_cqe)
194 return ERR_PTR(-EINVAL);
199 comp_vector = comp_vector % rdi->ibdev.num_comp_vectors;
201 /* Allocate the completion queue structure. */
202 cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, rdi->dparms.node);
204 return ERR_PTR(-ENOMEM);
207 * Allocate the completion queue entries and head/tail pointers.
208 * This is allocated separately so that it can be resized and
209 * also mapped into user space.
210 * We need to use vmalloc() in order to support mmap and large
211 * numbers of entries.
214 if (udata && udata->outlen >= sizeof(__u64))
215 sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
217 sz += sizeof(struct ib_wc) * (entries + 1);
220 vzalloc_node(sz, rdi->dparms.node);
222 ret = ERR_PTR(-ENOMEM);
227 * Return the address of the WC as the offset to mmap.
228 * See rvt_mmap() for details.
230 if (udata && udata->outlen >= sizeof(__u64)) {
233 cq->ip = rvt_create_mmap_info(rdi, sz, udata, wc);
235 ret = ERR_PTR(-ENOMEM);
239 err = ib_copy_to_udata(udata, &cq->ip->offset,
240 sizeof(cq->ip->offset));
247 spin_lock_irq(&rdi->n_cqs_lock);
248 if (rdi->n_cqs_allocated == rdi->dparms.props.max_cq) {
249 spin_unlock_irq(&rdi->n_cqs_lock);
250 ret = ERR_PTR(-ENOMEM);
254 rdi->n_cqs_allocated++;
255 spin_unlock_irq(&rdi->n_cqs_lock);
258 spin_lock_irq(&rdi->pending_lock);
259 list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps);
260 spin_unlock_irq(&rdi->pending_lock);
264 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
265 * The number of entries should be >= the number requested or return
269 if (rdi->driver_f.comp_vect_cpu_lookup)
270 cq->comp_vector_cpu =
271 rdi->driver_f.comp_vect_cpu_lookup(rdi, comp_vector);
273 cq->comp_vector_cpu =
274 cpumask_first(cpumask_of_node(rdi->dparms.node));
276 cq->ibcq.cqe = entries;
277 cq->notify = RVT_CQ_NONE;
278 spin_lock_init(&cq->lock);
279 INIT_WORK(&cq->comptask, send_complete);
284 trace_rvt_create_cq(cq, attr);
298 * rvt_destroy_cq - destroy a completion queue
299 * @ibcq: the completion queue to destroy.
300 * @udata: user data or NULL for kernel object
302 * Called by ib_destroy_cq() in the generic verbs code.
306 int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
308 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
309 struct rvt_dev_info *rdi = cq->rdi;
311 flush_work(&cq->comptask);
312 spin_lock_irq(&rdi->n_cqs_lock);
313 rdi->n_cqs_allocated--;
314 spin_unlock_irq(&rdi->n_cqs_lock);
316 kref_put(&cq->ip->ref, rvt_release_mmap_info);
325 * rvt_req_notify_cq - change the notification type for a completion queue
326 * @ibcq: the completion queue
327 * @notify_flags: the type of notification to request
329 * This may be called from interrupt context. Also called by
330 * ib_req_notify_cq() in the generic verbs code.
332 * Return: 0 for success.
334 int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
336 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
340 spin_lock_irqsave(&cq->lock, flags);
342 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
343 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
345 if (cq->notify != IB_CQ_NEXT_COMP)
346 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
348 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
349 cq->queue->head != cq->queue->tail)
352 spin_unlock_irqrestore(&cq->lock, flags);
358 * rvt_resize_cq - change the size of the CQ
359 * @ibcq: the completion queue
361 * Return: 0 for success.
363 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
365 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
366 struct rvt_cq_wc *old_wc;
367 struct rvt_cq_wc *wc;
371 struct rvt_dev_info *rdi = cq->rdi;
373 if (cqe < 1 || cqe > rdi->dparms.props.max_cqe)
377 * Need to use vmalloc() if we want to support large #s of entries.
380 if (udata && udata->outlen >= sizeof(__u64))
381 sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
383 sz += sizeof(struct ib_wc) * (cqe + 1);
386 vzalloc_node(sz, rdi->dparms.node);
390 /* Check that we can write the offset to mmap. */
391 if (udata && udata->outlen >= sizeof(__u64)) {
394 ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
399 spin_lock_irq(&cq->lock);
401 * Make sure head and tail are sane since they
402 * might be user writable.
406 if (head > (u32)cq->ibcq.cqe)
407 head = (u32)cq->ibcq.cqe;
409 if (tail > (u32)cq->ibcq.cqe)
410 tail = (u32)cq->ibcq.cqe;
412 n = cq->ibcq.cqe + 1 + head - tail;
415 if (unlikely((u32)cqe < n)) {
419 for (n = 0; tail != head; n++) {
421 wc->uqueue[n] = old_wc->uqueue[tail];
423 wc->kqueue[n] = old_wc->kqueue[tail];
424 if (tail == (u32)cq->ibcq.cqe)
433 spin_unlock_irq(&cq->lock);
438 struct rvt_mmap_info *ip = cq->ip;
440 rvt_update_mmap_info(rdi, ip, sz, wc);
443 * Return the offset to mmap.
444 * See rvt_mmap() for details.
446 if (udata && udata->outlen >= sizeof(__u64)) {
447 ret = ib_copy_to_udata(udata, &ip->offset,
453 spin_lock_irq(&rdi->pending_lock);
454 if (list_empty(&ip->pending_mmaps))
455 list_add(&ip->pending_mmaps, &rdi->pending_mmaps);
456 spin_unlock_irq(&rdi->pending_lock);
462 spin_unlock_irq(&cq->lock);
469 * rvt_poll_cq - poll for work completion entries
470 * @ibcq: the completion queue to poll
471 * @num_entries: the maximum number of entries to return
472 * @entry: pointer to array where work completions are placed
474 * This may be called from interrupt context. Also called by ib_poll_cq()
475 * in the generic verbs code.
477 * Return: the number of completion entries polled.
479 int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
481 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
482 struct rvt_cq_wc *wc;
487 /* The kernel can only poll a kernel completion queue */
491 spin_lock_irqsave(&cq->lock, flags);
495 if (tail > (u32)cq->ibcq.cqe)
496 tail = (u32)cq->ibcq.cqe;
497 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
498 if (tail == wc->head)
500 /* The kernel doesn't need a RMB since it has the lock. */
501 trace_rvt_cq_poll(cq, &wc->kqueue[tail], npolled);
502 *entry = wc->kqueue[tail];
503 if (tail >= cq->ibcq.cqe)
510 spin_unlock_irqrestore(&cq->lock, flags);
516 * rvt_driver_cq_init - Init cq resources on behalf of driver
517 * @rdi: rvt dev structure
519 * Return: 0 on success
521 int rvt_driver_cq_init(void)
523 comp_vector_wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE,
532 * rvt_cq_exit - tear down cq reources
533 * @rdi: rvt dev structure
535 void rvt_cq_exit(void)
537 destroy_workqueue(comp_vector_wq);
538 comp_vector_wq = NULL;