2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/hardirq.h>
38 #include <linux/export.h>
40 #include <linux/mlx4/cmd.h>
41 #include <linux/mlx4/cq.h>
46 #define MLX4_CQ_STATUS_OK ( 0 << 28)
47 #define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
48 #define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
49 #define MLX4_CQ_FLAG_CC ( 1 << 18)
50 #define MLX4_CQ_FLAG_OI ( 1 << 17)
51 #define MLX4_CQ_STATE_ARMED ( 9 << 8)
52 #define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
53 #define MLX4_EQ_STATE_FIRED (10 << 8)
55 #define TASKLET_MAX_TIME 2
56 #define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
58 void mlx4_cq_tasklet_cb(unsigned long data)
61 unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
62 struct mlx4_eq_tasklet *ctx = (struct mlx4_eq_tasklet *)data;
63 struct mlx4_cq *mcq, *temp;
65 spin_lock_irqsave(&ctx->lock, flags);
66 list_splice_tail_init(&ctx->list, &ctx->process_list);
67 spin_unlock_irqrestore(&ctx->lock, flags);
69 list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) {
70 list_del_init(&mcq->tasklet_ctx.list);
71 mcq->tasklet_ctx.comp(mcq);
72 if (atomic_dec_and_test(&mcq->refcount))
74 if (time_after(jiffies, end))
78 if (!list_empty(&ctx->process_list))
79 tasklet_schedule(&ctx->task);
82 static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
84 struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
88 spin_lock_irqsave(&tasklet_ctx->lock, flags);
89 /* When migrating CQs between EQs will be implemented, please note
90 * that you need to sync this point. It is possible that
91 * while migrating a CQ, completions on the old EQs could
94 if (list_empty_careful(&cq->tasklet_ctx.list)) {
95 atomic_inc(&cq->refcount);
96 kick = list_empty(&tasklet_ctx->list);
97 list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
99 tasklet_schedule(&tasklet_ctx->task);
101 spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
104 void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
109 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
110 cqn & (dev->caps.num_cqs - 1));
114 mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
118 /* Acessing the CQ outside of rcu_read_lock is safe, because
119 * the CQ is freed only after interrupt handling is completed.
126 void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
128 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
132 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
136 mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
140 /* Acessing the CQ outside of rcu_read_lock is safe, because
141 * the CQ is freed only after interrupt handling is completed.
143 cq->event(cq, event_type);
146 static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
149 return mlx4_cmd(dev, mailbox->dma, cq_num, 0,
150 MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
154 static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
155 int cq_num, u32 opmod)
157 return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
158 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
161 static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
164 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
165 cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
166 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
169 int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
170 u16 count, u16 period)
172 struct mlx4_cmd_mailbox *mailbox;
173 struct mlx4_cq_context *cq_context;
176 mailbox = mlx4_alloc_cmd_mailbox(dev);
178 return PTR_ERR(mailbox);
180 cq_context = mailbox->buf;
181 cq_context->cq_max_count = cpu_to_be16(count);
182 cq_context->cq_period = cpu_to_be16(period);
184 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
186 mlx4_free_cmd_mailbox(dev, mailbox);
189 EXPORT_SYMBOL_GPL(mlx4_cq_modify);
191 int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
192 int entries, struct mlx4_mtt *mtt)
194 struct mlx4_cmd_mailbox *mailbox;
195 struct mlx4_cq_context *cq_context;
199 mailbox = mlx4_alloc_cmd_mailbox(dev);
201 return PTR_ERR(mailbox);
203 cq_context = mailbox->buf;
204 cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
205 cq_context->log_page_size = mtt->page_shift - 12;
206 mtt_addr = mlx4_mtt_addr(dev, mtt);
207 cq_context->mtt_base_addr_h = mtt_addr >> 32;
208 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
210 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
212 mlx4_free_cmd_mailbox(dev, mailbox);
215 EXPORT_SYMBOL_GPL(mlx4_cq_resize);
217 int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
219 struct mlx4_priv *priv = mlx4_priv(dev);
220 struct mlx4_cq_table *cq_table = &priv->cq_table;
223 *cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
227 err = mlx4_table_get(dev, &cq_table->table, *cqn, GFP_KERNEL);
231 err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn, GFP_KERNEL);
237 mlx4_table_put(dev, &cq_table->table, *cqn);
240 mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR);
244 static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
249 if (mlx4_is_mfunc(dev)) {
250 err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
251 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
252 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
256 *cqn = get_param_l(&out_param);
260 return __mlx4_cq_alloc_icm(dev, cqn);
263 void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
265 struct mlx4_priv *priv = mlx4_priv(dev);
266 struct mlx4_cq_table *cq_table = &priv->cq_table;
268 mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
269 mlx4_table_put(dev, &cq_table->table, cqn);
270 mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR);
273 static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
278 if (mlx4_is_mfunc(dev)) {
279 set_param_l(&in_param, cqn);
280 err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
282 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
284 mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
286 __mlx4_cq_free_icm(dev, cqn);
289 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
290 struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec,
291 struct mlx4_cq *cq, unsigned vector, int collapsed,
294 struct mlx4_priv *priv = mlx4_priv(dev);
295 struct mlx4_cq_table *cq_table = &priv->cq_table;
296 struct mlx4_cmd_mailbox *mailbox;
297 struct mlx4_cq_context *cq_context;
301 if (vector >= dev->caps.num_comp_vectors)
306 err = mlx4_cq_alloc_icm(dev, &cq->cqn);
310 spin_lock(&cq_table->lock);
311 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
312 spin_unlock(&cq_table->lock);
316 mailbox = mlx4_alloc_cmd_mailbox(dev);
317 if (IS_ERR(mailbox)) {
318 err = PTR_ERR(mailbox);
322 cq_context = mailbox->buf;
323 cq_context->flags = cpu_to_be32(!!collapsed << 18);
325 cq_context->flags |= cpu_to_be32(1 << 19);
327 cq_context->logsize_usrpage =
328 cpu_to_be32((ilog2(nent) << 24) |
329 mlx4_to_hw_uar_index(dev, uar->index));
330 cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
331 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
333 mtt_addr = mlx4_mtt_addr(dev, mtt);
334 cq_context->mtt_base_addr_h = mtt_addr >> 32;
335 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
336 cq_context->db_rec_addr = cpu_to_be64(db_rec);
338 err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
339 mlx4_free_cmd_mailbox(dev, mailbox);
346 atomic_set(&cq->refcount, 1);
347 init_completion(&cq->free);
348 cq->comp = mlx4_add_cq_to_tasklet;
349 cq->tasklet_ctx.priv =
350 &priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx;
351 INIT_LIST_HEAD(&cq->tasklet_ctx.list);
354 cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq;
358 spin_lock(&cq_table->lock);
359 radix_tree_delete(&cq_table->tree, cq->cqn);
360 spin_unlock(&cq_table->lock);
363 mlx4_cq_free_icm(dev, cq->cqn);
367 EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
369 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
371 struct mlx4_priv *priv = mlx4_priv(dev);
372 struct mlx4_cq_table *cq_table = &priv->cq_table;
375 err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
377 mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
379 spin_lock(&cq_table->lock);
380 radix_tree_delete(&cq_table->tree, cq->cqn);
381 spin_unlock(&cq_table->lock);
383 synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
384 if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
385 priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
386 synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
388 if (atomic_dec_and_test(&cq->refcount))
390 wait_for_completion(&cq->free);
392 mlx4_cq_free_icm(dev, cq->cqn);
394 EXPORT_SYMBOL_GPL(mlx4_cq_free);
396 int mlx4_init_cq_table(struct mlx4_dev *dev)
398 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
401 spin_lock_init(&cq_table->lock);
402 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
403 if (mlx4_is_slave(dev))
406 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
407 dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
414 void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
416 if (mlx4_is_slave(dev))
418 /* Nothing to do to clean up radix_tree */
419 mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);