1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14 #include <linux/etherdevice.h>
15 #include <linux/io-64-nonatomic-lo-hi.h>
16 #include <linux/prefetch.h>
18 #include "vxge-traffic.h"
19 #include "vxge-config.h"
20 #include "vxge-main.h"
23 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
24 * @vp: Virtual Path handle.
26 * Enable vpath interrupts. The function is to be executed the last in
27 * vpath initialization sequence.
29 * See also: vxge_hw_vpath_intr_disable()
31 enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
35 struct __vxge_hw_virtualpath *vpath;
36 struct vxge_hw_vpath_reg __iomem *vp_reg;
37 enum vxge_hw_status status = VXGE_HW_OK;
39 status = VXGE_HW_ERR_INVALID_HANDLE;
45 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
46 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
50 vp_reg = vpath->vp_reg;
52 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
54 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
55 &vp_reg->general_errors_reg);
57 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
58 &vp_reg->pci_config_errors_reg);
60 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
61 &vp_reg->mrpcim_to_vpath_alarm_reg);
63 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
64 &vp_reg->srpcim_to_vpath_alarm_reg);
66 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
67 &vp_reg->vpath_ppif_int_status);
69 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
70 &vp_reg->srpcim_msg_to_vpath_reg);
72 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
73 &vp_reg->vpath_pcipif_int_status);
75 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
76 &vp_reg->prc_alarm_reg);
78 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
79 &vp_reg->wrdma_alarm_status);
81 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
82 &vp_reg->asic_ntwk_vp_err_reg);
84 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
85 &vp_reg->xgmac_vp_int_status);
87 val64 = readq(&vp_reg->vpath_general_int_status);
89 /* Mask unwanted interrupts */
91 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
92 &vp_reg->vpath_pcipif_int_mask);
94 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
95 &vp_reg->srpcim_msg_to_vpath_mask);
97 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
98 &vp_reg->srpcim_to_vpath_alarm_mask);
100 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
101 &vp_reg->mrpcim_to_vpath_alarm_mask);
103 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
104 &vp_reg->pci_config_errors_mask);
106 /* Unmask the individual interrupts */
108 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
109 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
110 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
111 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
112 &vp_reg->general_errors_mask);
114 __vxge_hw_pio_mem_write32_upper(
115 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
119 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
120 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
121 &vp_reg->kdfcctl_errors_mask);
123 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
125 __vxge_hw_pio_mem_write32_upper(
126 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
127 &vp_reg->prc_alarm_mask);
129 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
130 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
132 if (vpath->hldev->first_vp_id != vpath->vp_id)
133 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
134 &vp_reg->asic_ntwk_vp_err_mask);
136 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
137 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
138 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
139 &vp_reg->asic_ntwk_vp_err_mask);
141 __vxge_hw_pio_mem_write32_upper(0,
142 &vp_reg->vpath_general_int_mask);
149 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
150 * @vp: Virtual Path handle.
152 * Disable vpath interrupts. The function is to be executed the last in
153 * vpath initialization sequence.
155 * See also: vxge_hw_vpath_intr_enable()
157 enum vxge_hw_status vxge_hw_vpath_intr_disable(
158 struct __vxge_hw_vpath_handle *vp)
162 struct __vxge_hw_virtualpath *vpath;
163 enum vxge_hw_status status = VXGE_HW_OK;
164 struct vxge_hw_vpath_reg __iomem *vp_reg;
166 status = VXGE_HW_ERR_INVALID_HANDLE;
172 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
173 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
176 vp_reg = vpath->vp_reg;
178 __vxge_hw_pio_mem_write32_upper(
179 (u32)VXGE_HW_INTR_MASK_ALL,
180 &vp_reg->vpath_general_int_mask);
182 val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
184 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
186 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
187 &vp_reg->general_errors_mask);
189 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
190 &vp_reg->pci_config_errors_mask);
192 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
193 &vp_reg->mrpcim_to_vpath_alarm_mask);
195 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
196 &vp_reg->srpcim_to_vpath_alarm_mask);
198 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
199 &vp_reg->vpath_ppif_int_mask);
201 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
202 &vp_reg->srpcim_msg_to_vpath_mask);
204 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
205 &vp_reg->vpath_pcipif_int_mask);
207 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
208 &vp_reg->wrdma_alarm_mask);
210 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
211 &vp_reg->prc_alarm_mask);
213 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
214 &vp_reg->xgmac_vp_int_mask);
216 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
217 &vp_reg->asic_ntwk_vp_err_mask);
223 void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
225 struct vxge_hw_vpath_reg __iomem *vp_reg;
226 struct vxge_hw_vp_config *config;
229 if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
232 vp_reg = fifo->vp_reg;
233 config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
235 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
236 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
237 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
238 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
239 fifo->tim_tti_cfg1_saved = val64;
240 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
244 void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
246 u64 val64 = ring->tim_rti_cfg1_saved;
248 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
249 ring->tim_rti_cfg1_saved = val64;
250 writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
253 void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
255 u64 val64 = fifo->tim_tti_cfg3_saved;
256 u64 timer = (fifo->rtimer * 1000) / 272;
258 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
260 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
261 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
263 writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
264 /* tti_cfg3_saved is not updated again because it is
265 * initialized at one place only - init time.
269 void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
271 u64 val64 = ring->tim_rti_cfg3_saved;
272 u64 timer = (ring->rtimer * 1000) / 272;
274 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
276 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
277 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
279 writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
280 /* rti_cfg3_saved is not updated again because it is
281 * initialized at one place only - init time.
286 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
287 * @channeh: Channel for rx or tx handle
290 * The function masks the msix interrupt for the given msix_id
294 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
297 __vxge_hw_pio_mem_write32_upper(
298 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
299 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
303 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
304 * @channeh: Channel for rx or tx handle
307 * The function unmasks the msix interrupt for the given msix_id
312 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
315 __vxge_hw_pio_mem_write32_upper(
316 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
317 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
321 * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
322 * @channel: Channel for rx or tx handle
325 * The function unmasks the msix interrupt for the given msix_id
326 * if configured in MSIX oneshot mode
330 void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
332 __vxge_hw_pio_mem_write32_upper(
333 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
334 &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
338 * vxge_hw_device_set_intr_type - Updates the configuration
339 * with new interrupt type.
340 * @hldev: HW device handle.
341 * @intr_mode: New interrupt type
343 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
346 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
347 (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
348 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
349 (intr_mode != VXGE_HW_INTR_MODE_DEF))
350 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
352 hldev->config.intr_mode = intr_mode;
357 * vxge_hw_device_intr_enable - Enable interrupts.
358 * @hldev: HW device handle.
359 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
360 * the type(s) of interrupts to enable.
362 * Enable Titan interrupts. The function is to be executed the last in
363 * Titan initialization sequence.
365 * See also: vxge_hw_device_intr_disable()
367 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
373 vxge_hw_device_mask_all(hldev);
375 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
377 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
380 vxge_hw_vpath_intr_enable(
381 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
384 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
385 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
386 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
389 writeq(val64, &hldev->common_reg->tim_int_status0);
391 writeq(~val64, &hldev->common_reg->tim_int_mask0);
394 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
395 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
398 __vxge_hw_pio_mem_write32_upper(val32,
399 &hldev->common_reg->tim_int_status1);
401 __vxge_hw_pio_mem_write32_upper(~val32,
402 &hldev->common_reg->tim_int_mask1);
406 val64 = readq(&hldev->common_reg->titan_general_int_status);
408 vxge_hw_device_unmask_all(hldev);
412 * vxge_hw_device_intr_disable - Disable Titan interrupts.
413 * @hldev: HW device handle.
414 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
415 * the type(s) of interrupts to disable.
417 * Disable Titan interrupts.
419 * See also: vxge_hw_device_intr_enable()
421 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
425 vxge_hw_device_mask_all(hldev);
427 /* mask all the tim interrupts */
428 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
429 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
430 &hldev->common_reg->tim_int_mask1);
432 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
434 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
437 vxge_hw_vpath_intr_disable(
438 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
443 * vxge_hw_device_mask_all - Mask all device interrupts.
444 * @hldev: HW device handle.
446 * Mask all device interrupts.
448 * See also: vxge_hw_device_unmask_all()
450 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
454 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
455 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
457 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
458 &hldev->common_reg->titan_mask_all_int);
462 * vxge_hw_device_unmask_all - Unmask all device interrupts.
463 * @hldev: HW device handle.
465 * Unmask all device interrupts.
467 * See also: vxge_hw_device_mask_all()
469 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
473 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
474 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
476 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
477 &hldev->common_reg->titan_mask_all_int);
481 * vxge_hw_device_flush_io - Flush io writes.
482 * @hldev: HW device handle.
484 * The function performs a read operation to flush io writes.
488 void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
492 val32 = readl(&hldev->common_reg->titan_general_int_status);
496 * __vxge_hw_device_handle_error - Handle error
499 * @type: Error type. Please see enum vxge_hw_event{}
503 static enum vxge_hw_status
504 __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
505 enum vxge_hw_event type)
508 case VXGE_HW_EVENT_UNKNOWN:
510 case VXGE_HW_EVENT_RESET_START:
511 case VXGE_HW_EVENT_RESET_COMPLETE:
512 case VXGE_HW_EVENT_LINK_DOWN:
513 case VXGE_HW_EVENT_LINK_UP:
515 case VXGE_HW_EVENT_ALARM_CLEARED:
517 case VXGE_HW_EVENT_ECCERR:
518 case VXGE_HW_EVENT_MRPCIM_ECCERR:
520 case VXGE_HW_EVENT_FIFO_ERR:
521 case VXGE_HW_EVENT_VPATH_ERR:
522 case VXGE_HW_EVENT_CRITICAL_ERR:
523 case VXGE_HW_EVENT_SERR:
525 case VXGE_HW_EVENT_SRPCIM_SERR:
526 case VXGE_HW_EVENT_MRPCIM_SERR:
528 case VXGE_HW_EVENT_SLOT_FREEZE:
536 if (hldev->uld_callbacks->crit_err)
537 hldev->uld_callbacks->crit_err(hldev,
545 * __vxge_hw_device_handle_link_down_ind
546 * @hldev: HW device handle.
548 * Link down indication handler. The function is invoked by HW when
549 * Titan indicates that the link is down.
551 static enum vxge_hw_status
552 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
555 * If the previous link state is not down, return.
557 if (hldev->link_state == VXGE_HW_LINK_DOWN)
560 hldev->link_state = VXGE_HW_LINK_DOWN;
563 if (hldev->uld_callbacks->link_down)
564 hldev->uld_callbacks->link_down(hldev);
570 * __vxge_hw_device_handle_link_up_ind
571 * @hldev: HW device handle.
573 * Link up indication handler. The function is invoked by HW when
574 * Titan indicates that the link is up for programmable amount of time.
576 static enum vxge_hw_status
577 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
580 * If the previous link state is not down, return.
582 if (hldev->link_state == VXGE_HW_LINK_UP)
585 hldev->link_state = VXGE_HW_LINK_UP;
588 if (hldev->uld_callbacks->link_up)
589 hldev->uld_callbacks->link_up(hldev);
595 * __vxge_hw_vpath_alarm_process - Process Alarms.
596 * @vpath: Virtual Path.
597 * @skip_alarms: Do not clear the alarms
599 * Process vpath alarms.
602 static enum vxge_hw_status
603 __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
609 struct __vxge_hw_device *hldev = NULL;
610 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
612 struct vxge_hw_vpath_stats_sw_info *sw_stats;
613 struct vxge_hw_vpath_reg __iomem *vp_reg;
616 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
621 hldev = vpath->hldev;
622 vp_reg = vpath->vp_reg;
623 alarm_status = readq(&vp_reg->vpath_general_int_status);
625 if (alarm_status == VXGE_HW_ALL_FOXES) {
626 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
631 sw_stats = vpath->sw_stats;
633 if (alarm_status & ~(
634 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
635 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
636 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
637 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
638 sw_stats->error_stats.unknown_alarms++;
640 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
645 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
647 val64 = readq(&vp_reg->xgmac_vp_int_status);
650 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
652 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
655 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
657 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
659 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
661 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
663 sw_stats->error_stats.network_sustained_fault++;
666 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
667 &vp_reg->asic_ntwk_vp_err_mask);
669 __vxge_hw_device_handle_link_down_ind(hldev);
670 alarm_event = VXGE_HW_SET_LEVEL(
671 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
675 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
677 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
679 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
681 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
684 sw_stats->error_stats.network_sustained_ok++;
687 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
688 &vp_reg->asic_ntwk_vp_err_mask);
690 __vxge_hw_device_handle_link_up_ind(hldev);
691 alarm_event = VXGE_HW_SET_LEVEL(
692 VXGE_HW_EVENT_LINK_UP, alarm_event);
695 writeq(VXGE_HW_INTR_MASK_ALL,
696 &vp_reg->asic_ntwk_vp_err_reg);
698 alarm_event = VXGE_HW_SET_LEVEL(
699 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
706 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
708 pic_status = readq(&vp_reg->vpath_ppif_int_status);
711 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
713 val64 = readq(&vp_reg->general_errors_reg);
714 mask64 = readq(&vp_reg->general_errors_mask);
717 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
719 sw_stats->error_stats.ini_serr_det++;
721 alarm_event = VXGE_HW_SET_LEVEL(
722 VXGE_HW_EVENT_SERR, alarm_event);
726 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
728 sw_stats->error_stats.dblgen_fifo0_overflow++;
730 alarm_event = VXGE_HW_SET_LEVEL(
731 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
735 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
737 sw_stats->error_stats.statsb_pif_chain_error++;
740 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
742 sw_stats->error_stats.statsb_drop_timeout++;
745 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
747 sw_stats->error_stats.target_illegal_access++;
750 writeq(VXGE_HW_INTR_MASK_ALL,
751 &vp_reg->general_errors_reg);
752 alarm_event = VXGE_HW_SET_LEVEL(
753 VXGE_HW_EVENT_ALARM_CLEARED,
759 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
761 val64 = readq(&vp_reg->kdfcctl_errors_reg);
762 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
765 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
767 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
769 alarm_event = VXGE_HW_SET_LEVEL(
770 VXGE_HW_EVENT_FIFO_ERR,
775 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
777 sw_stats->error_stats.kdfcctl_fifo0_poison++;
779 alarm_event = VXGE_HW_SET_LEVEL(
780 VXGE_HW_EVENT_FIFO_ERR,
785 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
787 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
789 alarm_event = VXGE_HW_SET_LEVEL(
790 VXGE_HW_EVENT_FIFO_ERR,
795 writeq(VXGE_HW_INTR_MASK_ALL,
796 &vp_reg->kdfcctl_errors_reg);
797 alarm_event = VXGE_HW_SET_LEVEL(
798 VXGE_HW_EVENT_ALARM_CLEARED,
805 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
807 val64 = readq(&vp_reg->wrdma_alarm_status);
809 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
811 val64 = readq(&vp_reg->prc_alarm_reg);
812 mask64 = readq(&vp_reg->prc_alarm_mask);
814 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
816 sw_stats->error_stats.prc_ring_bumps++;
818 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
820 sw_stats->error_stats.prc_rxdcm_sc_err++;
822 alarm_event = VXGE_HW_SET_LEVEL(
823 VXGE_HW_EVENT_VPATH_ERR,
827 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
829 sw_stats->error_stats.prc_rxdcm_sc_abort++;
831 alarm_event = VXGE_HW_SET_LEVEL(
832 VXGE_HW_EVENT_VPATH_ERR,
836 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
838 sw_stats->error_stats.prc_quanta_size_err++;
840 alarm_event = VXGE_HW_SET_LEVEL(
841 VXGE_HW_EVENT_VPATH_ERR,
846 writeq(VXGE_HW_INTR_MASK_ALL,
847 &vp_reg->prc_alarm_reg);
848 alarm_event = VXGE_HW_SET_LEVEL(
849 VXGE_HW_EVENT_ALARM_CLEARED,
855 hldev->stats.sw_dev_err_stats.vpath_alarms++;
857 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
858 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
861 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
863 if (alarm_event == VXGE_HW_EVENT_SERR)
864 return VXGE_HW_ERR_CRITICAL;
866 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
867 VXGE_HW_ERR_SLOT_FREEZE :
868 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
873 * vxge_hw_device_begin_irq - Begin IRQ processing.
874 * @hldev: HW device handle.
875 * @skip_alarms: Do not clear the alarms
876 * @reason: "Reason" for the interrupt, the value of Titan's
877 * general_int_status register.
879 * The function performs two actions, It first checks whether (shared IRQ) the
880 * interrupt was raised by the device. Next, it masks the device interrupts.
883 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
884 * bridge. Therefore, two back-to-back interrupts are potentially possible.
886 * Returns: 0, if the interrupt is not "ours" (note that in this case the
887 * device remain enabled).
888 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
891 enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
892 u32 skip_alarms, u64 *reason)
898 enum vxge_hw_status ret = VXGE_HW_OK;
900 val64 = readq(&hldev->common_reg->titan_general_int_status);
902 if (unlikely(!val64)) {
903 /* not Titan interrupt */
905 ret = VXGE_HW_ERR_WRONG_IRQ;
909 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
911 adapter_status = readq(&hldev->common_reg->adapter_status);
913 if (adapter_status == VXGE_HW_ALL_FOXES) {
915 __vxge_hw_device_handle_error(hldev,
916 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
918 ret = VXGE_HW_ERR_SLOT_FREEZE;
923 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
927 vpath_mask = hldev->vpaths_deployed >>
928 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
931 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
932 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
937 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
940 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
942 enum vxge_hw_status error_level = VXGE_HW_OK;
944 hldev->stats.sw_dev_err_stats.vpath_alarms++;
946 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
948 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
951 ret = __vxge_hw_vpath_alarm_process(
952 &hldev->virtual_paths[i], skip_alarms);
954 error_level = VXGE_HW_SET_LEVEL(ret, error_level);
956 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
957 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
968 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
969 * condition that has caused the Tx and RX interrupt.
972 * Acknowledge (that is, clear) the condition that has caused
973 * the Tx and Rx interrupt.
974 * See also: vxge_hw_device_begin_irq(),
975 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
977 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
980 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
981 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
982 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
983 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
984 &hldev->common_reg->tim_int_status0);
987 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
988 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
989 __vxge_hw_pio_mem_write32_upper(
990 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
991 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
992 &hldev->common_reg->tim_int_status1);
997 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
999 * @dtrh: Buffer to return the DTR pointer
1001 * Allocates a dtr from the reserve array. If the reserve array is empty,
1002 * it swaps the reserve and free arrays.
1005 static enum vxge_hw_status
1006 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
1008 if (channel->reserve_ptr - channel->reserve_top > 0) {
1010 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
1015 /* switch between empty and full arrays */
1017 /* the idea behind such a design is that by having free and reserved
1018 * arrays separated we basically separated irq and non-irq parts.
1019 * i.e. no additional lock need to be done when we free a resource */
1021 if (channel->length - channel->free_ptr > 0) {
1022 swap(channel->reserve_arr, channel->free_arr);
1023 channel->reserve_ptr = channel->length;
1024 channel->reserve_top = channel->free_ptr;
1025 channel->free_ptr = channel->length;
1027 channel->stats->reserve_free_swaps_cnt++;
1029 goto _alloc_after_swap;
1032 channel->stats->full_cnt++;
1035 return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
1039 * vxge_hw_channel_dtr_post - Post a dtr to the channel
1040 * @channelh: Channel
1041 * @dtrh: DTR pointer
1043 * Posts a dtr to work array.
1047 vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
1049 vxge_assert(channel->work_arr[channel->post_index] == NULL);
1051 channel->work_arr[channel->post_index++] = dtrh;
1054 if (channel->post_index == channel->length)
1055 channel->post_index = 0;
1059 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
1061 * @dtr: Buffer to return the next completed DTR pointer
1063 * Returns the next completed dtr with out removing it from work array
1067 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
1069 vxge_assert(channel->compl_index < channel->length);
1071 *dtrh = channel->work_arr[channel->compl_index];
1076 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
1077 * @channel: Channel handle
1079 * Removes the next completed dtr from work array
1082 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
1084 channel->work_arr[channel->compl_index] = NULL;
1087 if (++channel->compl_index == channel->length)
1088 channel->compl_index = 0;
1090 channel->stats->total_compl_cnt++;
1094 * vxge_hw_channel_dtr_free - Frees a dtr
1095 * @channel: Channel handle
1098 * Returns the dtr to free array
1101 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
1103 channel->free_arr[--channel->free_ptr] = dtrh;
1107 * vxge_hw_channel_dtr_count
1108 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
1110 * Retrieve number of DTRs available. This function can not be called
1111 * from data path. ring_initial_replenishi() is the only user.
1113 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
1115 return (channel->reserve_ptr - channel->reserve_top) +
1116 (channel->length - channel->free_ptr);
1120 * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
1121 * @ring: Handle to the ring object used for receive
1122 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
1123 * with a valid handle.
1125 * Reserve Rx descriptor for the subsequent filling-in driver
1126 * and posting on the corresponding channel (@channelh)
1127 * via vxge_hw_ring_rxd_post().
1129 * Returns: VXGE_HW_OK - success.
1130 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
1133 enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
1136 enum vxge_hw_status status;
1137 struct __vxge_hw_channel *channel;
1139 channel = &ring->channel;
1141 status = vxge_hw_channel_dtr_alloc(channel, rxdh);
1143 if (status == VXGE_HW_OK) {
1144 struct vxge_hw_ring_rxd_1 *rxdp =
1145 (struct vxge_hw_ring_rxd_1 *)*rxdh;
1147 rxdp->control_0 = rxdp->control_1 = 0;
1154 * vxge_hw_ring_rxd_free - Free descriptor.
1155 * @ring: Handle to the ring object used for receive
1156 * @rxdh: Descriptor handle.
1158 * Free the reserved descriptor. This operation is "symmetrical" to
1159 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
1162 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
1165 * - reserved (vxge_hw_ring_rxd_reserve);
1167 * - posted (vxge_hw_ring_rxd_post);
1169 * - completed (vxge_hw_ring_rxd_next_completed);
1171 * - and recycled again (vxge_hw_ring_rxd_free).
1173 * For alternative state transitions and more details please refer to
1177 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
1179 struct __vxge_hw_channel *channel;
1181 channel = &ring->channel;
1183 vxge_hw_channel_dtr_free(channel, rxdh);
1188 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
1189 * @ring: Handle to the ring object used for receive
1190 * @rxdh: Descriptor handle.
1192 * This routine prepares a rxd and posts
1194 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
1196 struct __vxge_hw_channel *channel;
1198 channel = &ring->channel;
1200 vxge_hw_channel_dtr_post(channel, rxdh);
1204 * vxge_hw_ring_rxd_post_post - Process rxd after post.
1205 * @ring: Handle to the ring object used for receive
1206 * @rxdh: Descriptor handle.
1208 * Processes rxd after post
1210 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
1212 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1214 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1216 if (ring->stats->common_stats.usage_cnt > 0)
1217 ring->stats->common_stats.usage_cnt--;
1221 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
1222 * @ring: Handle to the ring object used for receive
1223 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
1225 * Post descriptor on the ring.
1226 * Prior to posting the descriptor should be filled in accordance with
1227 * Host/Titan interface specification for a given service (LL, etc.).
1230 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
1232 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1233 struct __vxge_hw_channel *channel;
1235 channel = &ring->channel;
1238 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1240 vxge_hw_channel_dtr_post(channel, rxdh);
1242 if (ring->stats->common_stats.usage_cnt > 0)
1243 ring->stats->common_stats.usage_cnt--;
1247 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
1248 * @ring: Handle to the ring object used for receive
1249 * @rxdh: Descriptor handle.
1251 * Processes rxd after post with memory barrier.
1253 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
1256 vxge_hw_ring_rxd_post_post(ring, rxdh);
1260 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
1261 * @ring: Handle to the ring object used for receive
1262 * @rxdh: Descriptor handle. Returned by HW.
1263 * @t_code: Transfer code, as per Titan User Guide,
1264 * Receive Descriptor Format. Returned by HW.
1266 * Retrieve the _next_ completed descriptor.
1267 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
1268 * driver of new completed descriptors. After that
1269 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
1270 * completions (the very first completion is passed by HW via
1271 * vxge_hw_ring_callback_f).
1273 * Implementation-wise, the driver is free to call
1274 * vxge_hw_ring_rxd_next_completed either immediately from inside the
1275 * ring callback, or in a deferred fashion and separate (from HW)
1278 * Non-zero @t_code means failure to fill-in receive buffer(s)
1279 * of the descriptor.
1280 * For instance, parity error detected during the data transfer.
1281 * In this case Titan will complete the descriptor and indicate
1282 * for the host that the received data is not to be used.
1283 * For details please refer to Titan User Guide.
1285 * Returns: VXGE_HW_OK - success.
1286 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1287 * are currently available for processing.
1289 * See also: vxge_hw_ring_callback_f{},
1290 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
1292 enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1293 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
1295 struct __vxge_hw_channel *channel;
1296 struct vxge_hw_ring_rxd_1 *rxdp;
1297 enum vxge_hw_status status = VXGE_HW_OK;
1300 channel = &ring->channel;
1302 vxge_hw_channel_dtr_try_complete(channel, rxdh);
1306 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1310 control_0 = rxdp->control_0;
1311 own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1312 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
1314 /* check whether it is not the end */
1315 if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1317 vxge_assert((rxdp)->host_control !=
1321 vxge_hw_channel_dtr_complete(channel);
1323 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
1325 ring->stats->common_stats.usage_cnt++;
1326 if (ring->stats->common_stats.usage_max <
1327 ring->stats->common_stats.usage_cnt)
1328 ring->stats->common_stats.usage_max =
1329 ring->stats->common_stats.usage_cnt;
1331 status = VXGE_HW_OK;
1335 /* reset it. since we don't want to return
1336 * garbage to the driver */
1338 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1344 * vxge_hw_ring_handle_tcode - Handle transfer code.
1345 * @ring: Handle to the ring object used for receive
1346 * @rxdh: Descriptor handle.
1347 * @t_code: One of the enumerated (and documented in the Titan user guide)
1350 * Handle descriptor's transfer code. The latter comes with each completed
1353 * Returns: one of the enum vxge_hw_status{} enumerated types.
1354 * VXGE_HW_OK - for success.
1355 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1357 enum vxge_hw_status vxge_hw_ring_handle_tcode(
1358 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1360 enum vxge_hw_status status = VXGE_HW_OK;
1362 /* If the t_code is not supported and if the
1363 * t_code is other than 0x5 (unparseable packet
1364 * such as unknown UPV6 header), Drop it !!!
1367 if (t_code == VXGE_HW_RING_T_CODE_OK ||
1368 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1369 status = VXGE_HW_OK;
1373 if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1374 status = VXGE_HW_ERR_INVALID_TCODE;
1378 ring->stats->rxd_t_code_err_cnt[t_code]++;
1384 * __vxge_hw_non_offload_db_post - Post non offload doorbell
1387 * @txdl_ptr: The starting location of the TxDL in host memory
1388 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1389 * @no_snoop: No snoop flags
1391 * This function posts a non-offload doorbell to doorbell FIFO
1394 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1395 u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1397 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1398 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1399 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1400 &fifo->nofl_db->control_0);
1404 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1410 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1412 * @fifoh: Handle to the fifo object used for non offload send
1414 u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1416 return vxge_hw_channel_dtr_count(&fifoh->channel);
1420 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1421 * @fifoh: Handle to the fifo object used for non offload send
1422 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1423 * with a valid handle.
1424 * @txdl_priv: Buffer to return the pointer to per txdl space
1426 * Reserve a single TxDL (that is, fifo descriptor)
1427 * for the subsequent filling-in by driver)
1428 * and posting on the corresponding channel (@channelh)
1429 * via vxge_hw_fifo_txdl_post().
1431 * Note: it is the responsibility of driver to reserve multiple descriptors
1432 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1433 * carries up to configured number (fifo.max_frags) of contiguous buffers.
1435 * Returns: VXGE_HW_OK - success;
1436 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1439 enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1440 struct __vxge_hw_fifo *fifo,
1441 void **txdlh, void **txdl_priv)
1443 struct __vxge_hw_channel *channel;
1444 enum vxge_hw_status status;
1447 channel = &fifo->channel;
1449 status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1451 if (status == VXGE_HW_OK) {
1452 struct vxge_hw_fifo_txd *txdp =
1453 (struct vxge_hw_fifo_txd *)*txdlh;
1454 struct __vxge_hw_fifo_txdl_priv *priv;
1456 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1458 /* reset the TxDL's private */
1459 priv->align_dma_offset = 0;
1460 priv->align_vaddr_start = priv->align_vaddr;
1461 priv->align_used_frags = 0;
1463 priv->alloc_frags = fifo->config->max_frags;
1464 priv->next_txdl_priv = NULL;
1466 *txdl_priv = (void *)(size_t)txdp->host_control;
1468 for (i = 0; i < fifo->config->max_frags; i++) {
1469 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1470 txdp->control_0 = txdp->control_1 = 0;
1478 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1480 * @fifo: Handle to the fifo object used for non offload send
1481 * @txdlh: Descriptor handle.
1482 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1484 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1485 * @size: Size of the data buffer (in bytes).
1487 * This API is part of the preparation of the transmit descriptor for posting
1488 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1489 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1490 * All three APIs fill in the fields of the fifo descriptor,
1491 * in accordance with the Titan specification.
1494 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1495 void *txdlh, u32 frag_idx,
1496 dma_addr_t dma_pointer, u32 size)
1498 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1499 struct vxge_hw_fifo_txd *txdp, *txdp_last;
1501 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1502 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1505 txdp->control_0 = txdp->control_1 = 0;
1507 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1508 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1509 txdp->control_1 |= fifo->interrupt_type;
1510 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1512 if (txdl_priv->frags) {
1513 txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1514 (txdl_priv->frags - 1);
1515 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1516 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1520 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1522 txdp->buffer_pointer = (u64)dma_pointer;
1523 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1524 fifo->stats->total_buffers++;
1529 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1530 * @fifo: Handle to the fifo object used for non offload send
1531 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1532 * @frags: Number of contiguous buffers that are part of a single
1533 * transmit operation.
1535 * Post descriptor on the 'fifo' type channel for transmission.
1536 * Prior to posting the descriptor should be filled in accordance with
1537 * Host/Titan interface specification for a given service (LL, etc.).
1540 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1542 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1543 struct vxge_hw_fifo_txd *txdp_last;
1544 struct vxge_hw_fifo_txd *txdp_first;
1546 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1549 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1550 txdp_last->control_0 |=
1551 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1552 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1554 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1556 __vxge_hw_non_offload_db_post(fifo,
1557 (u64)txdl_priv->dma_addr,
1558 txdl_priv->frags - 1,
1559 fifo->no_snoop_bits);
1561 fifo->stats->total_posts++;
1562 fifo->stats->common_stats.usage_cnt++;
1563 if (fifo->stats->common_stats.usage_max <
1564 fifo->stats->common_stats.usage_cnt)
1565 fifo->stats->common_stats.usage_max =
1566 fifo->stats->common_stats.usage_cnt;
1570 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1571 * @fifo: Handle to the fifo object used for non offload send
1572 * @txdlh: Descriptor handle. Returned by HW.
1573 * @t_code: Transfer code, as per Titan User Guide,
1574 * Transmit Descriptor Format.
1577 * Retrieve the _next_ completed descriptor.
1578 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1579 * driver of new completed descriptors. After that
1580 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1581 * completions (the very first completion is passed by HW via
1582 * vxge_hw_channel_callback_f).
1584 * Implementation-wise, the driver is free to call
1585 * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1586 * channel callback, or in a deferred fashion and separate (from HW)
1589 * Non-zero @t_code means failure to process the descriptor.
1590 * The failure could happen, for instance, when the link is
1591 * down, in which case Titan completes the descriptor because it
1592 * is not able to send the data out.
1594 * For details please refer to Titan User Guide.
1596 * Returns: VXGE_HW_OK - success.
1597 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1598 * are currently available for processing.
1601 enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1602 struct __vxge_hw_fifo *fifo, void **txdlh,
1603 enum vxge_hw_fifo_tcode *t_code)
1605 struct __vxge_hw_channel *channel;
1606 struct vxge_hw_fifo_txd *txdp;
1607 enum vxge_hw_status status = VXGE_HW_OK;
1609 channel = &fifo->channel;
1611 vxge_hw_channel_dtr_try_complete(channel, txdlh);
1615 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1619 /* check whether host owns it */
1620 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1622 vxge_assert(txdp->host_control != 0);
1624 vxge_hw_channel_dtr_complete(channel);
1626 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1628 if (fifo->stats->common_stats.usage_cnt > 0)
1629 fifo->stats->common_stats.usage_cnt--;
1631 status = VXGE_HW_OK;
1635 /* no more completions */
1637 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1643 * vxge_hw_fifo_handle_tcode - Handle transfer code.
1644 * @fifo: Handle to the fifo object used for non offload send
1645 * @txdlh: Descriptor handle.
1646 * @t_code: One of the enumerated (and documented in the Titan user guide)
1649 * Handle descriptor's transfer code. The latter comes with each completed
1652 * Returns: one of the enum vxge_hw_status{} enumerated types.
1653 * VXGE_HW_OK - for success.
1654 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1656 enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1658 enum vxge_hw_fifo_tcode t_code)
1660 enum vxge_hw_status status = VXGE_HW_OK;
1662 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1663 status = VXGE_HW_ERR_INVALID_TCODE;
1667 fifo->stats->txd_t_code_err_cnt[t_code]++;
1673 * vxge_hw_fifo_txdl_free - Free descriptor.
1674 * @fifo: Handle to the fifo object used for non offload send
1675 * @txdlh: Descriptor handle.
1677 * Free the reserved descriptor. This operation is "symmetrical" to
1678 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1681 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1684 * - reserved (vxge_hw_fifo_txdl_reserve);
1686 * - posted (vxge_hw_fifo_txdl_post);
1688 * - completed (vxge_hw_fifo_txdl_next_completed);
1690 * - and recycled again (vxge_hw_fifo_txdl_free).
1692 * For alternative state transitions and more details please refer to
1696 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1698 struct __vxge_hw_channel *channel;
1700 channel = &fifo->channel;
1702 vxge_hw_channel_dtr_free(channel, txdlh);
1706 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1707 * to MAC address table.
1708 * @vp: Vpath handle.
1709 * @macaddr: MAC address to be added for this vpath into the list
1710 * @macaddr_mask: MAC address mask for macaddr
1711 * @duplicate_mode: Duplicate MAC address add mode. Please see
1712 * enum vxge_hw_vpath_mac_addr_add_mode{}
1714 * Adds the given mac address and mac address mask into the list for this
1716 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1717 * vxge_hw_vpath_mac_addr_get_next
1721 vxge_hw_vpath_mac_addr_add(
1722 struct __vxge_hw_vpath_handle *vp,
1723 u8 (macaddr)[ETH_ALEN],
1724 u8 (macaddr_mask)[ETH_ALEN],
1725 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1730 enum vxge_hw_status status = VXGE_HW_OK;
1733 status = VXGE_HW_ERR_INVALID_HANDLE;
1737 for (i = 0; i < ETH_ALEN; i++) {
1739 data1 |= (u8)macaddr[i];
1742 data2 |= (u8)macaddr_mask[i];
1745 switch (duplicate_mode) {
1746 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1749 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1752 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1760 status = __vxge_hw_vpath_rts_table_set(vp,
1761 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1762 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1764 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1765 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1766 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1772 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1773 * from MAC address table.
1774 * @vp: Vpath handle.
1775 * @macaddr: First MAC address entry for this vpath in the list
1776 * @macaddr_mask: MAC address mask for macaddr
1778 * Returns the first mac address and mac address mask in the list for this
1780 * see also: vxge_hw_vpath_mac_addr_get_next
1784 vxge_hw_vpath_mac_addr_get(
1785 struct __vxge_hw_vpath_handle *vp,
1786 u8 (macaddr)[ETH_ALEN],
1787 u8 (macaddr_mask)[ETH_ALEN])
1792 enum vxge_hw_status status = VXGE_HW_OK;
1795 status = VXGE_HW_ERR_INVALID_HANDLE;
1799 status = __vxge_hw_vpath_rts_table_get(vp,
1800 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1801 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1804 if (status != VXGE_HW_OK)
1807 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1809 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1811 for (i = ETH_ALEN; i > 0; i--) {
1812 macaddr[i-1] = (u8)(data1 & 0xFF);
1815 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1823 * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1825 * from MAC address table.
1826 * @vp: Vpath handle.
1827 * @macaddr: Next MAC address entry for this vpath in the list
1828 * @macaddr_mask: MAC address mask for macaddr
1830 * Returns the next mac address and mac address mask in the list for this
1832 * see also: vxge_hw_vpath_mac_addr_get
1836 vxge_hw_vpath_mac_addr_get_next(
1837 struct __vxge_hw_vpath_handle *vp,
1838 u8 (macaddr)[ETH_ALEN],
1839 u8 (macaddr_mask)[ETH_ALEN])
1844 enum vxge_hw_status status = VXGE_HW_OK;
1847 status = VXGE_HW_ERR_INVALID_HANDLE;
1851 status = __vxge_hw_vpath_rts_table_get(vp,
1852 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1853 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1856 if (status != VXGE_HW_OK)
1859 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1861 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1863 for (i = ETH_ALEN; i > 0; i--) {
1864 macaddr[i-1] = (u8)(data1 & 0xFF);
1867 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1876 * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1877 * to MAC address table.
1878 * @vp: Vpath handle.
1879 * @macaddr: MAC address to be added for this vpath into the list
1880 * @macaddr_mask: MAC address mask for macaddr
1882 * Delete the given mac address and mac address mask into the list for this
1884 * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1885 * vxge_hw_vpath_mac_addr_get_next
1889 vxge_hw_vpath_mac_addr_delete(
1890 struct __vxge_hw_vpath_handle *vp,
1891 u8 (macaddr)[ETH_ALEN],
1892 u8 (macaddr_mask)[ETH_ALEN])
1897 enum vxge_hw_status status = VXGE_HW_OK;
1900 status = VXGE_HW_ERR_INVALID_HANDLE;
1904 for (i = 0; i < ETH_ALEN; i++) {
1906 data1 |= (u8)macaddr[i];
1909 data2 |= (u8)macaddr_mask[i];
1912 status = __vxge_hw_vpath_rts_table_set(vp,
1913 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1914 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1916 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1917 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1923 * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1925 * @vp: Vpath handle.
1926 * @vid: vlan id to be added for this vpath into the list
1928 * Adds the given vlan id into the list for this vpath.
1929 * see also: vxge_hw_vpath_vid_delete
1933 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1935 enum vxge_hw_status status = VXGE_HW_OK;
1938 status = VXGE_HW_ERR_INVALID_HANDLE;
1942 status = __vxge_hw_vpath_rts_table_set(vp,
1943 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1944 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1945 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1951 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1953 * @vp: Vpath handle.
1954 * @vid: vlan id to be added for this vpath into the list
1956 * Adds the given vlan id into the list for this vpath.
1957 * see also: vxge_hw_vpath_vid_add
1961 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1963 enum vxge_hw_status status = VXGE_HW_OK;
1966 status = VXGE_HW_ERR_INVALID_HANDLE;
1970 status = __vxge_hw_vpath_rts_table_set(vp,
1971 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1972 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1973 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1979 * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1980 * @vp: Vpath handle.
1982 * Enable promiscuous mode of Titan-e operation.
1984 * See also: vxge_hw_vpath_promisc_disable().
1986 enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1987 struct __vxge_hw_vpath_handle *vp)
1990 struct __vxge_hw_virtualpath *vpath;
1991 enum vxge_hw_status status = VXGE_HW_OK;
1993 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1994 status = VXGE_HW_ERR_INVALID_HANDLE;
2000 /* Enable promiscuous mode for function 0 only */
2001 if (!(vpath->hldev->access_rights &
2002 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
2005 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2007 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
2009 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2010 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2011 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
2012 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
2014 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2021 * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
2022 * @vp: Vpath handle.
2024 * Disable promiscuous mode of Titan-e operation.
2026 * See also: vxge_hw_vpath_promisc_enable().
2028 enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2029 struct __vxge_hw_vpath_handle *vp)
2032 struct __vxge_hw_virtualpath *vpath;
2033 enum vxge_hw_status status = VXGE_HW_OK;
2035 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2036 status = VXGE_HW_ERR_INVALID_HANDLE;
2042 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2044 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
2046 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2047 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2048 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
2050 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2057 * vxge_hw_vpath_bcast_enable - Enable broadcast
2058 * @vp: Vpath handle.
2060 * Enable receiving broadcasts.
2062 enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2063 struct __vxge_hw_vpath_handle *vp)
2066 struct __vxge_hw_virtualpath *vpath;
2067 enum vxge_hw_status status = VXGE_HW_OK;
2069 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2070 status = VXGE_HW_ERR_INVALID_HANDLE;
2076 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2078 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
2079 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
2080 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2087 * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
2088 * @vp: Vpath handle.
2090 * Enable Titan-e multicast addresses.
2091 * Returns: VXGE_HW_OK on success.
2094 enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2095 struct __vxge_hw_vpath_handle *vp)
2098 struct __vxge_hw_virtualpath *vpath;
2099 enum vxge_hw_status status = VXGE_HW_OK;
2101 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2102 status = VXGE_HW_ERR_INVALID_HANDLE;
2108 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2110 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
2111 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2112 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2119 * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
2120 * @vp: Vpath handle.
2122 * Disable Titan-e multicast addresses.
2123 * Returns: VXGE_HW_OK - success.
2124 * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
2128 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
2131 struct __vxge_hw_virtualpath *vpath;
2132 enum vxge_hw_status status = VXGE_HW_OK;
2134 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2135 status = VXGE_HW_ERR_INVALID_HANDLE;
2141 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2143 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
2144 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2145 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2152 * vxge_hw_vpath_alarm_process - Process Alarms.
2153 * @vpath: Virtual Path.
2154 * @skip_alarms: Do not clear the alarms
2156 * Process vpath alarms.
2159 enum vxge_hw_status vxge_hw_vpath_alarm_process(
2160 struct __vxge_hw_vpath_handle *vp,
2163 enum vxge_hw_status status = VXGE_HW_OK;
2166 status = VXGE_HW_ERR_INVALID_HANDLE;
2170 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2176 * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2178 * @vp: Virtual Path handle.
2179 * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2180 * interrupts(Can be repeated). If fifo or ring are not enabled
2181 * the MSIX vector for that should be set to 0
2182 * @alarm_msix_id: MSIX vector for alarm.
2184 * This API will associate a given MSIX vector numbers with the four TIM
2185 * interrupts and alarm interrupt.
2188 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2192 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2193 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2194 u32 vp_id = vp->vpath->vp_id;
2196 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2197 (vp_id * 4) + tim_msix_id[0]) |
2198 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2199 (vp_id * 4) + tim_msix_id[1]);
2201 writeq(val64, &vp_reg->interrupt_cfg0);
2203 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2204 (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2205 &vp_reg->interrupt_cfg2);
2207 if (vpath->hldev->config.intr_mode ==
2208 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2209 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2210 VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2211 0, 32), &vp_reg->one_shot_vect0_en);
2212 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2213 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2214 0, 32), &vp_reg->one_shot_vect1_en);
2215 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2216 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2217 0, 32), &vp_reg->one_shot_vect2_en);
2222 * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2223 * @vp: Virtual Path handle.
2226 * The function masks the msix interrupt for the given msix_id
2229 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2234 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2236 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2237 __vxge_hw_pio_mem_write32_upper(
2238 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2239 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2243 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2244 * @vp: Virtual Path handle.
2247 * The function clears the msix interrupt for the given msix_id
2250 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2254 void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2256 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2258 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)
2259 __vxge_hw_pio_mem_write32_upper(
2260 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2261 &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2263 __vxge_hw_pio_mem_write32_upper(
2264 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2265 &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2269 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2270 * @vp: Virtual Path handle.
2273 * The function unmasks the msix interrupt for the given msix_id
2276 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2281 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2283 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2284 __vxge_hw_pio_mem_write32_upper(
2285 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2286 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2290 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2291 * @vp: Virtual Path handle.
2293 * Mask Tx and Rx vpath interrupts.
2295 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2297 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2299 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2300 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2302 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2304 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2305 tim_int_mask1, vp->vpath->vp_id);
2307 val64 = readq(&hldev->common_reg->tim_int_mask0);
2309 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2310 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2311 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2312 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2313 &hldev->common_reg->tim_int_mask0);
2316 val64 = readl(&hldev->common_reg->tim_int_mask1);
2318 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2319 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2320 __vxge_hw_pio_mem_write32_upper(
2321 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2322 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2323 &hldev->common_reg->tim_int_mask1);
2328 * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2329 * @vp: Virtual Path handle.
2331 * Unmask Tx and Rx vpath interrupts.
2333 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2335 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2337 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2338 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2340 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2342 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2343 tim_int_mask1, vp->vpath->vp_id);
2345 val64 = readq(&hldev->common_reg->tim_int_mask0);
2347 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2348 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2349 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2350 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2351 &hldev->common_reg->tim_int_mask0);
2354 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2355 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2356 __vxge_hw_pio_mem_write32_upper(
2357 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2358 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2359 &hldev->common_reg->tim_int_mask1);
2364 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2365 * descriptors and process the same.
2366 * @ring: Handle to the ring object used for receive
2368 * The function polls the Rx for the completed descriptors and calls
2369 * the driver via supplied completion callback.
2371 * Returns: VXGE_HW_OK, if the polling is completed successful.
2372 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2373 * descriptors available which are yet to be processed.
2375 * See also: vxge_hw_vpath_poll_rx()
2377 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2380 enum vxge_hw_status status = VXGE_HW_OK;
2387 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2388 if (status == VXGE_HW_OK)
2389 ring->callback(ring, first_rxdh,
2390 t_code, ring->channel.userdata);
2392 if (ring->cmpl_cnt != 0) {
2393 ring->doorbell_cnt += ring->cmpl_cnt;
2394 if (ring->doorbell_cnt >= ring->rxds_limit) {
2396 * Each RxD is of 4 qwords, update the number of
2397 * qwords replenished
2399 new_count = (ring->doorbell_cnt * 4);
2401 /* For each block add 4 more qwords */
2402 ring->total_db_cnt += ring->doorbell_cnt;
2403 if (ring->total_db_cnt >= ring->rxds_per_block) {
2405 /* Reset total count */
2406 ring->total_db_cnt %= ring->rxds_per_block;
2408 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2409 &ring->vp_reg->prc_rxd_doorbell);
2411 readl(&ring->common_reg->titan_general_int_status);
2412 ring->doorbell_cnt = 0;
2420 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2422 * @fifo: Handle to the fifo object used for non offload send
2424 * The function polls the Tx for the completed descriptors and calls
2425 * the driver via supplied completion callback.
2427 * Returns: VXGE_HW_OK, if the polling is completed successful.
2428 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2429 * descriptors available which are yet to be processed.
2431 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2432 struct sk_buff ***skb_ptr, int nr_skb,
2435 enum vxge_hw_fifo_tcode t_code;
2437 enum vxge_hw_status status = VXGE_HW_OK;
2438 struct __vxge_hw_channel *channel;
2440 channel = &fifo->channel;
2442 status = vxge_hw_fifo_txdl_next_completed(fifo,
2443 &first_txdlh, &t_code);
2444 if (status == VXGE_HW_OK)
2445 if (fifo->callback(fifo, first_txdlh, t_code,
2446 channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2447 status = VXGE_HW_COMPLETIONS_REMAIN;