mmiowb() is now implied by spin_unlock() on architectures that require
it, so there is no reason to call it from driver code. This patch was
generated using coccinelle:
@mmiowb@
@@
- mmiowb();
and invoked as:
$ for d in drivers include/linux/qed sound; do \
spatch --include-headers --sp-file mmiowb.cocci --dir $d --in-place; done
NOTE: mmiowb() has only ever guaranteed ordering in conjunction with
spin_unlock(). However, pairing each mmiowb() removal in this patch with
the corresponding call to spin_unlock() is not at all trivial, so there
is a small chance that this change may regress any drivers incorrectly
relying on mmiowb() to order MMIO writes between CPUs using lock-free
synchronisation. If you've ended up bisecting to this commit, you can
reintroduce the mmiowb() calls using wmb() instead, which should restore
the old behaviour on all architectures other than some esoteric ia64
systems.
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
123 files changed:
/* Ring doorbell with count 1 */
writeq(1, cmdq->dbell_csr_addr);
/* Ring doorbell with count 1 */
writeq(1, cmdq->dbell_csr_addr);
- /* orders the doorbell rings */
- mmiowb();
cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
* MSI-X interrupt generates if Completion count > Threshold
*/
writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
* MSI-X interrupt generates if Completion count > Threshold
*/
writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
- /* order the writes */
- mmiowb();
if (atomic_read(&cmdq->backlog_count))
schedule_work(&cmdq->backlog_qflush);
if (atomic_read(&cmdq->backlog_count))
schedule_work(&cmdq->backlog_qflush);
channel_writel(dc, SAIR, 0);
channel_writel(dc, DAIR, 0);
channel_writel(dc, CCR, 0);
channel_writel(dc, SAIR, 0);
channel_writel(dc, DAIR, 0);
channel_writel(dc, CCR, 0);
}
/* Called with dc->lock held and bh disabled */
}
/* Called with dc->lock held and bh disabled */
dma_sync_single_for_device(chan2parent(&dc->chan),
prev->txd.phys, ddev->descsize,
DMA_TO_DEVICE);
dma_sync_single_for_device(chan2parent(&dc->chan),
prev->txd.phys, ddev->descsize,
DMA_TO_DEVICE);
if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) &&
channel_read_CHAR(dc) == prev->txd.phys)
/* Restart chain DMA */
if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) &&
channel_read_CHAR(dc) == prev->txd.phys)
/* Restart chain DMA */
static void txx9dmac_off(struct txx9dmac_dev *ddev)
{
dma_writel(ddev, MCR, 0);
static void txx9dmac_off(struct txx9dmac_dev *ddev)
{
dma_writel(ddev, MCR, 0);
}
static int __init txx9dmac_chan_probe(struct platform_device *pdev)
}
static int __init txx9dmac_chan_probe(struct platform_device *pdev)
reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
ohci->mc_channels = channels;
}
ohci->mc_channels = channels;
}
I915_WRITE(VIDEO_DIP_CTL, val);
I915_WRITE(VIDEO_DIP_CTL, val);
for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(VIDEO_DIP_DATA, 0);
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(VIDEO_DIP_DATA, 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
for (i = 0; i < len; i += 4) {
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
for (i = 0; i < len; i += 4) {
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
for (i = 0; i < len; i += 4) {
I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
type, i >> 2), *data);
for (i = 0; i < len; i += 4) {
I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
type, i >> 2), *data);
for (; i < data_size; i += 4)
I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
type, i >> 2), 0);
for (; i < data_size; i += 4)
I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
type, i >> 2), 0);
val |= hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
val |= hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
u16 sysctl = tx4939ide_readw(base, TX4939IDE_Sys_Ctl);
tx4939ide_writew(sysctl | 0x4000, base, TX4939IDE_Sys_Ctl);
u16 sysctl = tx4939ide_readw(base, TX4939IDE_Sys_Ctl);
tx4939ide_writew(sysctl | 0x4000, base, TX4939IDE_Sys_Ctl);
/* wait 12GBUSCLK (typ. 60ns @ GBUS200MHz, max 270ns) */
ndelay(270);
tx4939ide_writew(sysctl, base, TX4939IDE_Sys_Ctl);
/* wait 12GBUSCLK (typ. 60ns @ GBUS200MHz, max 270ns) */
ndelay(270);
tx4939ide_writew(sysctl, base, TX4939IDE_Sys_Ctl);
/* Soft Reset */
tx4939ide_writew(0x8000, base, TX4939IDE_Sys_Ctl);
/* Soft Reset */
tx4939ide_writew(0x8000, base, TX4939IDE_Sys_Ctl);
/* at least 20 GBUSCLK (typ. 100ns @ GBUS200MHz, max 450ns) */
ndelay(450);
tx4939ide_writew(0x0000, base, TX4939IDE_Sys_Ctl);
/* at least 20 GBUSCLK (typ. 100ns @ GBUS200MHz, max 450ns) */
ndelay(450);
tx4939ide_writew(0x0000, base, TX4939IDE_Sys_Ctl);
struct hfi1_devdata *dd = rcd->dd;
u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
struct hfi1_devdata *dd = rcd->dd;
u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
write_csr(dd, addr, rcd->imask);
/* force the above write on the chip and get a value back */
(void)read_csr(dd, addr);
write_csr(dd, addr, rcd->imask);
/* force the above write on the chip and get a value back */
(void)read_csr(dd, addr);
<< RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
}
<< RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
}
reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
(((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
<< RCV_HDR_HEAD_HEAD_SHIFT);
write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
(((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
<< RCV_HDR_HEAD_HEAD_SHIFT);
write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
}
u32 hdrqempty(struct hfi1_ctxtdata *rcd)
}
u32 hdrqempty(struct hfi1_ctxtdata *rcd)
sc_del_credit_return_intr(sc);
trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl);
if (needint) {
sc_del_credit_return_intr(sc);
trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl);
if (needint) {
sc_return_credits(sc);
}
}
sc_return_credits(sc);
}
}
writel_relaxed(qp->doorbell_qpn,
to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
writel_relaxed(qp->doorbell_qpn,
to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
- /*
- * Make sure doorbells don't leak out of SQ spinlock
- * and reach the HCA out of order.
- */
- mmiowb();
-
stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1);
qp->sq_next_wqe = ind;
stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1);
qp->sq_next_wqe = ind;
/* Make sure doorbells don't leak out of SQ spinlock
* and reach the HCA out of order.
*/
/* Make sure doorbells don't leak out of SQ spinlock
* and reach the HCA out of order.
*/
bf->offset ^= bf->buf_size;
}
bf->offset ^= bf->buf_size;
}
err = mthca_cmd_post_hcr(dev, in_param, out_param, in_modifier,
op_modifier, op, token, event);
err = mthca_cmd_post_hcr(dev, in_param, out_param, in_modifier,
op_modifier, op, token, event);
- /*
- * Make sure that our HCR writes don't get mixed in with
- * writes from another CPU starting a FW command.
- */
- mmiowb();
-
mutex_unlock(&dev->cmd.hcr_mutex);
return err;
}
mutex_unlock(&dev->cmd.hcr_mutex);
return err;
}
mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1,
dev->kar + MTHCA_CQ_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1,
dev->kar + MTHCA_CQ_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
- /*
- * Make sure doorbells don't leak out of CQ spinlock
- * and reach the HCA out of order:
- */
- mmiowb();
(qp->qpn << 8) | size0,
dev->kar + MTHCA_SEND_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
(qp->qpn << 8) | size0,
dev->kar + MTHCA_SEND_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
- /*
- * Make sure doorbells don't leak out of SQ spinlock
- * and reach the HCA out of order:
- */
- mmiowb();
qp->rq.next_ind = ind;
qp->rq.head += nreq;
qp->rq.next_ind = ind;
qp->rq.head += nreq;
- /*
- * Make sure doorbells don't leak out of RQ spinlock and reach
- * the HCA out of order:
- */
- mmiowb();
-
spin_unlock_irqrestore(&qp->rq.lock, flags);
return err;
}
spin_unlock_irqrestore(&qp->rq.lock, flags);
return err;
}
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
}
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
}
- /*
- * Make sure doorbells don't leak out of SQ spinlock and reach
- * the HCA out of order:
- */
- mmiowb();
-
spin_unlock_irqrestore(&qp->sq.lock, flags);
return err;
}
spin_unlock_irqrestore(&qp->sq.lock, flags);
return err;
}
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
}
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
}
- /*
- * Make sure doorbells don't leak out of SRQ spinlock and
- * reach the HCA out of order:
- */
- mmiowb();
-
spin_unlock_irqrestore(&srq->lock, flags);
return err;
}
spin_unlock_irqrestore(&srq->lock, flags);
return err;
}
cq->db.data.agg_flags = flags;
cq->db.data.value = cpu_to_le32(cons);
writeq(cq->db.raw, cq->db_addr);
cq->db.data.agg_flags = flags;
cq->db.data.value = cpu_to_le32(cons);
writeq(cq->db.raw, cq->db_addr);
-
- /* Make sure write would stick */
- mmiowb();
}
int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
}
int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
if (rdma_protocol_roce(&dev->ibdev, 1)) {
writel(qp->rq.db_data.raw, qp->rq.db);
if (rdma_protocol_roce(&dev->ibdev, 1)) {
writel(qp->rq.db_data.raw, qp->rq.db);
- /* Make sure write takes effect */
- mmiowb();
}
break;
case QED_ROCE_QP_STATE_ERR:
}
break;
case QED_ROCE_QP_STATE_ERR:
smp_wmb();
writel(qp->sq.db_data.raw, qp->sq.db);
smp_wmb();
writel(qp->sq.db_data.raw, qp->sq.db);
- /* Make sure write sticks */
- mmiowb();
-
spin_unlock_irqrestore(&qp->q_lock, flags);
return rc;
spin_unlock_irqrestore(&qp->q_lock, flags);
return rc;
writel(qp->rq.db_data.raw, qp->rq.db);
writel(qp->rq.db_data.raw, qp->rq.db);
- /* Make sure write sticks */
- mmiowb();
-
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
qib_write_kreg(dd, kr_scratch, 0xfeeddeaf);
writel(pa, tidp32);
qib_write_kreg(dd, kr_scratch, 0xdeadbeef);
qib_write_kreg(dd, kr_scratch, 0xfeeddeaf);
writel(pa, tidp32);
qib_write_kreg(dd, kr_scratch, 0xdeadbeef);
spin_unlock_irqrestore(tidlockp, flags);
}
spin_unlock_irqrestore(tidlockp, flags);
}
pa |= 2 << 29;
}
writel(pa, tidp32);
pa |= 2 << 29;
}
writel(pa, tidp32);
{
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
{
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
}
static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
}
static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
pa = chippa;
}
writeq(pa, tidptr);
pa = chippa;
}
writeq(pa, tidptr);
{
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
{
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
}
static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
}
static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
pa = chippa;
}
writeq(pa, tidptr);
pa = chippa;
}
writeq(pa, tidptr);
adjust_rcv_timeout(rcd, npkts);
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
adjust_rcv_timeout(rcd, npkts);
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
}
static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
}
static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
writeq(data, iaddr + idx);
for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
writeq(data, iaddr + idx);
qib_read_kreg32(dd, kr_scratch);
dds_reg_map >>= 4;
for (midx = 0; midx < DDS_ROWS; ++midx) {
qib_read_kreg32(dd, kr_scratch);
dds_reg_map >>= 4;
for (midx = 0; midx < DDS_ROWS; ++midx) {
data = dds_init_vals[midx].reg_vals[idx];
writeq(data, daddr);
data = dds_init_vals[midx].reg_vals[idx];
writeq(data, daddr);
qib_read_kreg32(dd, kr_scratch);
} /* End inner for (vals for this reg, each row) */
} /* end outer for (regs to be stored) */
qib_read_kreg32(dd, kr_scratch);
} /* End inner for (vals for this reg, each row) */
} /* end outer for (regs to be stored) */
didx = idx + min_idx;
/* Store the next RXEQ register address */
writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
didx = idx + min_idx;
/* Store the next RXEQ register address */
writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
qib_read_kreg32(dd, kr_scratch);
/* Iterate through RXEQ values */
for (vidx = 0; vidx < 4; vidx++) {
data = rxeq_init_vals[idx].rdata[vidx];
writeq(data, taddr + (vidx << 6) + idx);
qib_read_kreg32(dd, kr_scratch);
/* Iterate through RXEQ values */
for (vidx = 0; vidx < 4; vidx++) {
data = rxeq_init_vals[idx].rdata[vidx];
writeq(data, taddr + (vidx << 6) + idx);
qib_read_kreg32(dd, kr_scratch);
}
} /* end outer for (Reg-writes for RXEQ) */
qib_read_kreg32(dd, kr_scratch);
}
} /* end outer for (Reg-writes for RXEQ) */
u32 tmp = index;
iowrite32((tmp << 17) | IIC_READ, addr + IIC_CSR2);
u32 tmp = index;
iowrite32((tmp << 17) | IIC_READ, addr + IIC_CSR2);
udelay(45); /* wait at least 43 usec for NEW_CYCLE to clear */
if (ioread32(addr + IIC_CSR2) & NEW_CYCLE)
return -EIO; /* error: NEW_CYCLE not cleared */
udelay(45); /* wait at least 43 usec for NEW_CYCLE to clear */
if (ioread32(addr + IIC_CSR2) & NEW_CYCLE)
return -EIO; /* error: NEW_CYCLE not cleared */
u32 tmp = index;
iowrite32((tmp << 17) | IIC_WRITE | data, addr + IIC_CSR2);
u32 tmp = index;
iowrite32((tmp << 17) | IIC_WRITE | data, addr + IIC_CSR2);
udelay(65); /* wait at least 63 usec for NEW_CYCLE to clear */
if (ioread32(addr + IIC_CSR2) & NEW_CYCLE)
return -EIO; /* error: NEW_CYCLE not cleared */
udelay(65); /* wait at least 63 usec for NEW_CYCLE to clear */
if (ioread32(addr + IIC_CSR2) & NEW_CYCLE)
return -EIO; /* error: NEW_CYCLE not cleared */
u32 tmp = index;
iowrite32((tmp << 17) | IIC_WRITE | data, addr + IIC_CSR2);
u32 tmp = index;
iowrite32((tmp << 17) | IIC_WRITE | data, addr + IIC_CSR2);
FLD_DN_ODD | FLD_DN_EVEN |
CAP_CONT_EVEN | CAP_CONT_ODD,
ipd->regs + CSR1);
FLD_DN_ODD | FLD_DN_EVEN |
CAP_CONT_EVEN | CAP_CONT_ODD,
ipd->regs + CSR1);
iowrite32(dma_addr + ipd->width, ipd->regs + ODD_DMA_START);
iowrite32(ipd->width, ipd->regs + EVEN_DMA_STRIDE);
iowrite32(ipd->width, ipd->regs + ODD_DMA_STRIDE);
iowrite32(dma_addr + ipd->width, ipd->regs + ODD_DMA_START);
iowrite32(ipd->width, ipd->regs + EVEN_DMA_STRIDE);
iowrite32(ipd->width, ipd->regs + ODD_DMA_STRIDE);
}
/* enable interrupts, clear all irq flags */
}
/* enable interrupts, clear all irq flags */
/* resetting the adapter */
iowrite32(ADDR_ERR_ODD | ADDR_ERR_EVEN | FLD_CRPT_ODD | FLD_CRPT_EVEN |
FLD_DN_ODD | FLD_DN_EVEN, pd->regs + CSR1);
/* resetting the adapter */
iowrite32(ADDR_ERR_ODD | ADDR_ERR_EVEN | FLD_CRPT_ODD | FLD_CRPT_EVEN |
FLD_DN_ODD | FLD_DN_EVEN, pd->regs + CSR1);
msleep(20);
/* initializing adapter registers */
iowrite32(FIFO_EN | SRST, pd->regs + CSR1);
msleep(20);
/* initializing adapter registers */
iowrite32(FIFO_EN | SRST, pd->regs + CSR1);
iowrite32(0xEEEEEE01, pd->regs + EVEN_PIXEL_FMT);
iowrite32(0xEEEEEE01, pd->regs + ODD_PIXEL_FMT);
iowrite32(0x00000020, pd->regs + FIFO_TRIGER);
iowrite32(0xEEEEEE01, pd->regs + EVEN_PIXEL_FMT);
iowrite32(0xEEEEEE01, pd->regs + ODD_PIXEL_FMT);
iowrite32(0x00000020, pd->regs + FIFO_TRIGER);
iowrite32(0, pd->regs + MASK_LENGTH);
iowrite32(0x0005007C, pd->regs + FIFO_FLAG_CNT);
iowrite32(0x01010101, pd->regs + IIC_CLK_DUR);
iowrite32(0, pd->regs + MASK_LENGTH);
iowrite32(0x0005007C, pd->regs + FIFO_FLAG_CNT);
iowrite32(0x01010101, pd->regs + IIC_CLK_DUR);
/* verifying that we have a DT3155 board (not just a SAA7116 chip) */
read_i2c_reg(pd->regs, DT_ID, &tmp);
/* verifying that we have a DT3155 board (not just a SAA7116 chip) */
read_i2c_reg(pd->regs, DT_ID, &tmp);
writel(HOST_CONTROL_RESET_REQ | HOST_CONTROL_CLOCK_EN
| readl(host->addr + HOST_CONTROL),
host->addr + HOST_CONTROL);
writel(HOST_CONTROL_RESET_REQ | HOST_CONTROL_CLOCK_EN
| readl(host->addr + HOST_CONTROL),
host->addr + HOST_CONTROL);
for (cnt = 0; cnt < 20; ++cnt) {
if (!(HOST_CONTROL_RESET_REQ
for (cnt = 0; cnt < 20; ++cnt) {
if (!(HOST_CONTROL_RESET_REQ
writel(HOST_CONTROL_RESET | HOST_CONTROL_CLOCK_EN
| readl(host->addr + HOST_CONTROL),
host->addr + HOST_CONTROL);
writel(HOST_CONTROL_RESET | HOST_CONTROL_CLOCK_EN
| readl(host->addr + HOST_CONTROL),
host->addr + HOST_CONTROL);
for (cnt = 0; cnt < 20; ++cnt) {
if (!(HOST_CONTROL_RESET
for (cnt = 0; cnt < 20; ++cnt) {
if (!(HOST_CONTROL_RESET
writel(INT_STATUS_ALL, host->addr + INT_SIGNAL_ENABLE);
writel(INT_STATUS_ALL, host->addr + INT_STATUS_ENABLE);
return 0;
writel(INT_STATUS_ALL, host->addr + INT_SIGNAL_ENABLE);
writel(INT_STATUS_ALL, host->addr + INT_STATUS_ENABLE);
return 0;
tasklet_kill(&host->notify);
writel(0, host->addr + INT_SIGNAL_ENABLE);
writel(0, host->addr + INT_STATUS_ENABLE);
tasklet_kill(&host->notify);
writel(0, host->addr + INT_SIGNAL_ENABLE);
writel(0, host->addr + INT_STATUS_ENABLE);
dev_dbg(&jm->pdev->dev, "interrupts off\n");
spin_lock_irqsave(&host->lock, flags);
if (host->req) {
dev_dbg(&jm->pdev->dev, "interrupts off\n");
spin_lock_irqsave(&host->lock, flags);
if (host->req) {
/* Reset to power-on state */
writel(0, &idd->idd_misc_regs->int_out.raw);
/* Reset to power-on state */
writel(0, &idd->idd_misc_regs->int_out.raw);
/* Set up square wave */
int_out.raw = 0;
/* Set up square wave */
int_out.raw = 0;
int_out.fields.mode = IOC4_INT_OUT_MODE_TOGGLE;
int_out.fields.diag = 0;
writel(int_out.raw, &idd->idd_misc_regs->int_out.raw);
int_out.fields.mode = IOC4_INT_OUT_MODE_TOGGLE;
int_out.fields.diag = 0;
writel(int_out.raw, &idd->idd_misc_regs->int_out.raw);
/* Check square wave period averaged over some number of cycles */
start = ktime_get_ns();
/* Check square wave period averaged over some number of cycles */
start = ktime_get_ns();
hcsr |= H_IG;
hcsr &= ~H_RST;
mei_hcsr_set(dev, hcsr);
hcsr |= H_IG;
hcsr &= ~H_RST;
mei_hcsr_set(dev, hcsr);
-
- /* complete this write before we set host ready on another CPU */
- mmiowb();
fm->eject = tifm_7xx1_dummy_eject;
fm->has_ms_pif = tifm_7xx1_dummy_has_ms_pif;
writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
fm->eject = tifm_7xx1_dummy_eject;
fm->has_ms_pif = tifm_7xx1_dummy_has_ms_pif;
writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
free_irq(dev->irq, fm);
tifm_remove_adapter(fm);
free_irq(dev->irq, fm);
tifm_remove_adapter(fm);
alcor_request_complete(host, 0);
}
alcor_request_complete(host, 0);
}
mutex_unlock(&host->cmd_mutex);
}
mutex_unlock(&host->cmd_mutex);
}
sdhci_send_command(host, mrq->cmd);
}
sdhci_send_command(host, mrq->cmd);
}
spin_unlock_irqrestore(&host->lock, flags);
}
EXPORT_SYMBOL_GPL(sdhci_request);
spin_unlock_irqrestore(&host->lock, flags);
}
EXPORT_SYMBOL_GPL(sdhci_request);
*/
if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
*/
if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
}
EXPORT_SYMBOL_GPL(sdhci_set_ios);
}
EXPORT_SYMBOL_GPL(sdhci_set_ios);
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
spin_unlock_irqrestore(&host->lock, flags);
/* Wait for Buffer Read Ready interrupt */
spin_unlock_irqrestore(&host->lock, flags);
/* Wait for Buffer Read Ready interrupt */
host->mrqs_done[i] = NULL;
host->mrqs_done[i] = NULL;
spin_unlock_irqrestore(&host->lock, flags);
mmc_request_done(host->mmc, mrq);
spin_unlock_irqrestore(&host->lock, flags);
mmc_request_done(host->mmc, mrq);
sdhci_finish_mrq(host, host->cmd->mrq);
}
sdhci_finish_mrq(host, host->cmd->mrq);
}
spin_unlock_irqrestore(&host->lock, flags);
}
spin_unlock_irqrestore(&host->lock, flags);
}
spin_unlock_irqrestore(&host->lock, flags);
}
spin_unlock_irqrestore(&host->lock, flags);
}
mmc->ops->set_ios(mmc, &mmc->ios);
} else {
sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
mmc->ops->set_ios(mmc, &mmc->ios);
} else {
sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
}
if (host->irq_wake_enabled) {
}
if (host->irq_wake_enabled) {
mmc_hostname(mmc), host->ier,
sdhci_readl(host, SDHCI_INT_STATUS));
mmc_hostname(mmc), host->ier,
sdhci_readl(host, SDHCI_INT_STATUS));
spin_unlock_irqrestore(&host->lock, flags);
}
EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
spin_unlock_irqrestore(&host->lock, flags);
}
EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
mmc_hostname(mmc), host->ier,
sdhci_readl(host, SDHCI_INT_STATUS));
mmc_hostname(mmc), host->ier,
sdhci_readl(host, SDHCI_INT_STATUS));
spin_unlock_irqrestore(&host->lock, flags);
}
EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
spin_unlock_irqrestore(&host->lock, flags);
}
EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
ret = mmc_add_host(mmc);
if (ret)
goto unled;
ret = mmc_add_host(mmc);
if (ret)
goto unled;
struct tifm_dev *sock = host->dev;
writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
struct tifm_dev *sock = host->dev;
writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
host->clk_div = 61;
host->clk_freq = 20000000;
writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL);
host->clk_div = 61;
host->clk_freq = 20000000;
writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL);
writel(TIFM_MMCSD_CERR | TIFM_MMCSD_BRS | TIFM_MMCSD_EOC
| TIFM_MMCSD_ERRMASK,
sock->addr + SOCK_MMCSD_INT_ENABLE);
writel(TIFM_MMCSD_CERR | TIFM_MMCSD_BRS | TIFM_MMCSD_EOC
| TIFM_MMCSD_ERRMASK,
sock->addr + SOCK_MMCSD_INT_ENABLE);
spin_lock_irqsave(&sock->lock, flags);
host->eject = 1;
writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
spin_lock_irqsave(&sock->lock, flags);
host->eject = 1;
writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
spin_unlock_irqrestore(&sock->lock, flags);
tasklet_kill(&host->finish_tasklet);
spin_unlock_irqrestore(&sock->lock, flags);
tasklet_kill(&host->finish_tasklet);
via_sdc_send_command(host, mrq->cmd);
}
via_sdc_send_command(host, mrq->cmd);
}
spin_unlock_irqrestore(&host->lock, flags);
}
spin_unlock_irqrestore(&host->lock, flags);
}
gatt &= ~VIA_CRDR_PCICLKGATT_PAD_PWRON;
writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
gatt &= ~VIA_CRDR_PCICLKGATT_PAD_PWRON;
writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
spin_unlock_irqrestore(&host->lock, flags);
via_pwron_sleep(host);
spin_unlock_irqrestore(&host->lock, flags);
via_pwron_sleep(host);
if (readb(addrbase + VIA_CRDR_PCISDCCLK) != clock)
writeb(clock, addrbase + VIA_CRDR_PCISDCCLK);
if (readb(addrbase + VIA_CRDR_PCISDCCLK) != clock)
writeb(clock, addrbase + VIA_CRDR_PCISDCCLK);
spin_unlock_irqrestore(&host->lock, flags);
if (ios->power_mode != MMC_POWER_OFF)
spin_unlock_irqrestore(&host->lock, flags);
if (ios->power_mode != MMC_POWER_OFF)
via_restore_pcictrlreg(host);
via_restore_sdcreg(host);
via_restore_pcictrlreg(host);
via_restore_sdcreg(host);
spin_unlock_irqrestore(&host->lock, flags);
}
spin_unlock_irqrestore(&host->lock, flags);
}
out:
spin_unlock(&sdhost->lock);
out:
spin_unlock(&sdhost->lock);
spin_unlock_irqrestore(&sdhost->lock, flags);
}
spin_unlock_irqrestore(&sdhost->lock, flags);
}
tasklet_schedule(&host->finish_tasklet);
}
tasklet_schedule(&host->finish_tasklet);
}
spin_unlock_irqrestore(&host->lock, flags);
via_reset_pcictrl(host);
spin_unlock_irqrestore(&host->lock, flags);
via_reset_pcictrl(host);
spin_lock_irqsave(&host->lock, flags);
}
spin_lock_irqsave(&host->lock, flags);
}
spin_unlock_irqrestore(&host->lock, flags);
via_print_pcictrl(host);
spin_unlock_irqrestore(&host->lock, flags);
via_print_pcictrl(host);
/* Disable generating further interrupts */
writeb(0x0, sdhost->pcictrl_mmiobase + VIA_CRDR_PCIINTCTRL);
/* Disable generating further interrupts */
writeb(0x0, sdhost->pcictrl_mmiobase + VIA_CRDR_PCIINTCTRL);
if (sdhost->mrq) {
pr_err("%s: Controller removed during "
if (sdhost->mrq) {
pr_err("%s: Controller removed during "
/* make sure all DMA is stopped */
writel(VIA_CRDR_DMACTRL_SFTRST,
sdhost->ddma_mmiobase + VIA_CRDR_DMACTRL);
/* make sure all DMA is stopped */
writel(VIA_CRDR_DMACTRL_SFTRST,
sdhost->ddma_mmiobase + VIA_CRDR_DMACTRL);
sdhost->mrq->cmd->error = -ENOMEDIUM;
if (sdhost->mrq->stop)
sdhost->mrq->stop->error = -ENOMEDIUM;
sdhost->mrq->cmd->error = -ENOMEDIUM;
if (sdhost->mrq->stop)
sdhost->mrq->stop->error = -ENOMEDIUM;
int address, uint8_t value)
{
writeb(value, dev->mmio + address);
int address, uint8_t value)
{
writeb(value, dev->mmio + address);
int address, uint32_t value)
{
writel(cpu_to_le32(value), dev->mmio + address);
int address, uint32_t value)
{
writel(cpu_to_le32(value), dev->mmio + address);
}
/* returns pointer to our private structure */
}
/* returns pointer to our private structure */
if ((ctrl & NAND_CTRL_CHANGE) && cmd == NAND_CMD_NONE)
txx9ndfmc_write(dev, 0, TXX9_NDFDTR);
}
if ((ctrl & NAND_CTRL_CHANGE) && cmd == NAND_CMD_NONE)
txx9ndfmc_write(dev, 0, TXX9_NDFDTR);
}
}
static int txx9ndfmc_dev_ready(struct nand_chip *chip)
}
static int txx9ndfmc_dev_ready(struct nand_chip *chip)
napi_schedule(&greth->napi);
}
napi_schedule(&greth->napi);
}
spin_unlock(&greth->devlock);
return retval;
spin_unlock(&greth->devlock);
return retval;
if (sdev->promisc != set_promisc) {
sdev->promisc = set_promisc;
slic_configure_rcv(sdev);
if (sdev->promisc != set_promisc) {
sdev->promisc = set_promisc;
slic_configure_rcv(sdev);
- /* make sure writes to receiver cant leak out of the lock */
- mmiowb();
}
spin_unlock_bh(&sdev->link_lock);
}
}
spin_unlock_bh(&sdev->link_lock);
}
if (slic_get_free_tx_descs(txq) < SLIC_MAX_REQ_TX_DESCS)
netif_stop_queue(dev);
if (slic_get_free_tx_descs(txq) < SLIC_MAX_REQ_TX_DESCS)
netif_stop_queue(dev);
- /* make sure writes to io-memory cant leak out of tx queue lock */
- mmiowb();
return NETDEV_TX_OK;
drop_skb:
return NETDEV_TX_OK;
drop_skb:
mb();
writel_relaxed((u32)aenq->head,
dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
mb();
writel_relaxed((u32)aenq->head,
dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
}
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
atl1_tx_map(adapter, skb, ptpd);
atl1_tx_queue(adapter, count, ptpd);
atl1_update_mailbox(adapter);
atl1_tx_map(adapter, skb, ptpd);
atl1_tx_queue(adapter, count, ptpd);
atl1_update_mailbox(adapter);
ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX,
(adapter->txd_write_ptr >> 2));
ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX,
(adapter->txd_write_ptr >> 2));
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
BNX2_WR16(bp, txr->tx_bidx_addr, prod);
BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
BNX2_WR16(bp, txr->tx_bidx_addr, prod);
BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
txr->tx_prod = prod;
if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
txr->tx_prod = prod;
if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
txdata->tx_bd_prod += nbd;
if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
txdata->tx_bd_prod += nbd;
if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
REG_WR_RELAXED(bp, fp->ustorm_rx_prods_offset + i * 4,
((u32 *)&rx_prods)[i]);
REG_WR_RELAXED(bp, fp->ustorm_rx_prods_offset + i * 4,
((u32 *)&rx_prods)[i]);
DP(NETIF_MSG_RX_STATUS,
"queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
DP(NETIF_MSG_RX_STATUS,
"queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
/* Make sure that ACK is written */
REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
/* Make sure that ACK is written */
REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
/* Make sure that ACK is written */
REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
/* Make sure that ACK is written */
wmb();
DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
wmb();
DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
"write %x to HC %d (addr 0x%x)\n",
val, port, addr);
"write %x to HC %d (addr 0x%x)\n",
val, port, addr);
- /* flush all outstanding writes */
- mmiowb();
-
REG_WR(bp, addr, val);
if (REG_RD(bp, addr) != val)
BNX2X_ERR("BUG! Proper val not read from IGU!\n");
REG_WR(bp, addr, val);
if (REG_RD(bp, addr) != val)
BNX2X_ERR("BUG! Proper val not read from IGU!\n");
DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
- /* flush all outstanding writes */
- mmiowb();
-
REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
BNX2X_ERR("BUG! Proper val not read from IGU!\n");
REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
BNX2X_ERR("BUG! Proper val not read from IGU!\n");
/*
* Ensure that HC_CONFIG is written before leading/trailing edge config
*/
/*
* Ensure that HC_CONFIG is written before leading/trailing edge config
*/
barrier();
if (!CHIP_IS_E1(bp)) {
barrier();
if (!CHIP_IS_E1(bp)) {
REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
}
REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
}
-
- /* Make sure that interrupts are indeed enabled from here on */
- mmiowb();
}
static void bnx2x_igu_int_enable(struct bnx2x *bp)
}
static void bnx2x_igu_int_enable(struct bnx2x *bp)
REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
-
- /* Make sure that interrupts are indeed enabled from here on */
- mmiowb();
}
void bnx2x_int_enable(struct bnx2x *bp)
}
void bnx2x_int_enable(struct bnx2x *bp)
REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
bp->spq_prod_idx);
REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
bp->spq_prod_idx);
{
/* No memory barriers */
storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
{
/* No memory barriers */
storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
}
static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
}
static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
}
void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
}
void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
/* flush all before enabling interrupts */
mb();
/* flush all before enabling interrupts */
mb();
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
data, igu_addr_data);
REG_WR(bp, igu_addr_data, data);
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
data, igu_addr_data);
REG_WR(bp, igu_addr_data, data);
barrier();
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
ctl, igu_addr_ctl);
REG_WR(bp, igu_addr_ctl, ctl);
barrier();
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
ctl, igu_addr_ctl);
REG_WR(bp, igu_addr_ctl, ctl);
barrier();
/* wait for clean up to finish */
barrier();
/* wait for clean up to finish */
DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
close ? "closing" : "opening");
DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
close ? "closing" : "opening");
}
#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
}
#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
if (!CHIP_IS_E1(bp)) {
REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
if (!CHIP_IS_E1(bp)) {
REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
reset_mask1 & (~not_reset_mask1));
barrier();
reset_mask1 & (~not_reset_mask1));
barrier();
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
reset_mask2 & (~stay_reset2));
barrier();
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
reset_mask2 & (~stay_reset2));
barrier();
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
REG_WR(bp, MISC_REG_UNPREPARED, 0);
barrier();
REG_WR(bp, MISC_REG_UNPREPARED, 0);
barrier();
- /* Make sure all is written to the chip before the reset */
- mmiowb();
-
/* Wait for 1ms to empty GLUE and PCI-E core queues,
* PSWHST, GRC and PSWRD Tetris buffer.
*/
/* Wait for 1ms to empty GLUE and PCI-E core queues,
* PSWHST, GRC and PSWRD Tetris buffer.
*/
barrier();
/* Start accepting on iSCSI L2 ring */
barrier();
/* Start accepting on iSCSI L2 ring */
if (!bnx2x_wait_sp_comp(bp, sp_bits))
BNX2X_ERR("rx_mode completion timed out!\n");
if (!bnx2x_wait_sp_comp(bp, sp_bits))
BNX2X_ERR("rx_mode completion timed out!\n");
barrier();
/* Unset iSCSI L2 MAC */
barrier();
/* Unset iSCSI L2 MAC */
/* As no ramrod is sent, complete the command immediately */
o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
/* As no ramrod is sent, complete the command immediately */
o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
cmd_data.sb_id_and_flags, igu_addr_data);
REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
cmd_data.sb_id_and_flags, igu_addr_data);
REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
barrier();
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
ctl, igu_addr_ctl);
REG_WR(bp, igu_addr_ctl, ctl);
barrier();
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
ctl, igu_addr_ctl);
REG_WR(bp, igu_addr_ctl, ctl);
/* Trigger the PF FW */
writeb_relaxed(1, &zone_data->trigger.vf_pf_channel.addr_valid);
/* Trigger the PF FW */
writeb_relaxed(1, &zone_data->trigger.vf_pf_channel.addr_valid);
/* Wait for PF to complete */
while ((tout >= 0) && (!*done)) {
msleep(interval);
/* Wait for PF to complete */
while ((tout >= 0) && (!*done)) {
msleep(interval);
/* ack the FW */
storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
/* ack the FW */
storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
/* copy the response header including status-done field,
* must be last dmae, must be after FW is acked
/* copy the response header including status-done field,
* must be last dmae, must be after FW is acked
*/
storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
/* Firmware ack should be written before unlocking channel */
*/
storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
/* Firmware ack should be written before unlocking channel */
bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
}
}
bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
}
}
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
if (skb->xmit_more && !tx_buf->is_push)
bnxt_db_write(bp, &txr->tx_db, prod);
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
if (skb->xmit_more && !tx_buf->is_push)
bnxt_db_write(bp, &txr->tx_db, prod);
&dim_sample);
net_dim(&cpr->dim, dim_sample);
}
&dim_sample);
net_dim(&cpr->dim, dim_sample);
}
struct tg3 *tp = tnapi->tp;
tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
struct tg3 *tp = tnapi->tp;
tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
/* When doing tagged status, this work check is unnecessary.
* The last_tag we write above tells the chip which piece of
/* When doing tagged status, this work check is unnecessary.
* The last_tag we write above tells the chip which piece of
tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
tpr->rx_jmb_prod_idx);
}
tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
tpr->rx_jmb_prod_idx);
}
} else if (work_mask) {
/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
* updated before the producer indices can be updated.
} else if (work_mask) {
/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
* updated before the producer indices can be updated.
tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
dpr->rx_jmb_prod_idx);
tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
dpr->rx_jmb_prod_idx);
if (err)
tw32_f(HOSTCC_MODE, tp->coal_now);
}
if (err)
tw32_f(HOSTCC_MODE, tp->coal_now);
}
HOSTCC_MODE_ENABLE |
tnapi->coal_now);
}
HOSTCC_MODE_ENABLE |
tnapi->coal_now);
}
if (!skb->xmit_more || netif_xmit_stopped(txq)) {
/* Packets are ready, update Tx producer idx on card. */
tw32_tx_mbox(tnapi->prodmbox, entry);
if (!skb->xmit_more || netif_xmit_stopped(txq)) {
/* Packets are ready, update Tx producer idx on card. */
tw32_tx_mbox(tnapi->prodmbox, entry);
lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST);
lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST);
lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST);
lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST);
- /* make sure that the reset is written before starting timer */
- mmiowb();
-
/* Wait for 10ms as Octeon resets. */
mdelay(100);
/* Wait for 10ms as Octeon resets. */
mdelay(100);
/* Disable Interrupts */
writeq(0, cn6xxx->intr_enb_reg64);
/* Disable Interrupts */
writeq(0, cn6xxx->intr_enb_reg64);
-
- /* make sure interrupts are really disabled */
- mmiowb();
}
static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
}
static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
value &= ~(1 << oq_no);
octeon_write_csr(oct, reg, value);
value &= ~(1 << oq_no);
octeon_write_csr(oct, reg, value);
- /* Ensure that the enable register is written.
- */
- mmiowb();
-
spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg);
}
}
spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg);
}
}
iq->pkt_in_done -= iq->pkts_processed;
iq->pkts_processed = 0;
/* this write needs to be flushed before we release the lock */
iq->pkt_in_done -= iq->pkts_processed;
iq->pkts_processed = 0;
/* this write needs to be flushed before we release the lock */
spin_unlock_bh(&iq->lock);
oct = iq->oct_dev;
}
spin_unlock_bh(&iq->lock);
oct = iq->oct_dev;
}
*/
wmb();
writel(desc_refilled, droq->pkts_credit_reg);
*/
wmb();
writel(desc_refilled, droq->pkts_credit_reg);
- /* make sure mmio write completes */
- mmiowb();
if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP)
reschedule = 0;
if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP)
reschedule = 0;
*/
wmb();
writel(desc_refilled, droq->pkts_credit_reg);
*/
wmb();
writel(desc_refilled, droq->pkts_credit_reg);
- /* make sure mmio write completes */
- mmiowb();
}
}
} /* for (each packet)... */
}
}
} /* for (each packet)... */
if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
writel(iq->fill_cnt, iq->doorbell_reg);
/* make sure doorbell write goes through */
if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
writel(iq->fill_cnt, iq->doorbell_reg);
/* make sure doorbell write goes through */
iq->fill_cnt = 0;
iq->last_db_time = jiffies;
return;
iq->fill_cnt = 0;
iq->last_db_time = jiffies;
return;
if (!skb->xmit_more ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
if (!skb->xmit_more ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
- /* we need this if more than one processor can write to
- * our tail at a time, it synchronizes IO on IA64/Altix
- * systems
- */
- mmiowb();
}
} else {
dev_kfree_skb_any(skb);
}
} else {
dev_kfree_skb_any(skb);
if (tx_ring->next_to_use == tx_ring->count)
tx_ring->next_to_use = 0;
ew32(TDT(0), tx_ring->next_to_use);
if (tx_ring->next_to_use == tx_ring->count)
tx_ring->next_to_use = 0;
ew32(TDT(0), tx_ring->next_to_use);
usleep_range(200, 250);
}
usleep_range(200, 250);
}
tx_ring->next_to_use);
else
writel(tx_ring->next_to_use, tx_ring->tail);
tx_ring->next_to_use);
else
writel(tx_ring->next_to_use, tx_ring->tail);
-
- /* we need this if more than one processor can write
- * to our tail at a time, it synchronizes IO on
- *IA64/Altix systems
- */
- mmiowb();
}
} else {
dev_kfree_skb_any(skb);
}
} else {
dev_kfree_skb_any(skb);
pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &err_mask);
err_mask |= PCI_ERR_UNC_COMP_ABORT;
pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, err_mask);
pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &err_mask);
err_mask |= PCI_ERR_UNC_COMP_ABORT;
pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, err_mask);
}
int fm10k_iov_resume(struct pci_dev *pdev)
}
int fm10k_iov_resume(struct pci_dev *pdev)
/* notify HW of packet */
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
/* notify HW of packet */
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
/* notify HW of packet */
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
/* notify HW of packet */
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
/* notify HW of packet */
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
/* notify HW of packet */
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
/* notify HW of packet */
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
/* notify HW of packet */
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
tx_ring->buffer_info[first].next_to_watch = tx_desc;
tx_ring->next_to_use = i;
writel(i, adapter->hw.hw_addr + tx_ring->tail);
tx_ring->buffer_info[first].next_to_watch = tx_desc;
tx_ring->next_to_use = i;
writel(i, adapter->hw.hw_addr + tx_ring->tail);
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
}
static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
}
static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
/* Make sure write' to descriptors are complete before we tell hardware */
wmb();
sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
/* Make sure write' to descriptors are complete before we tell hardware */
wmb();
sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
-
- /* Synchronize I/O on since next processor may write to tail */
- mmiowb();
/* reset the Rx prefetch unit */
sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
/* reset the Rx prefetch unit */
sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
}
/* Clean out receive buffer area, assumes receiver hardware stopped */
}
/* Clean out receive buffer area, assumes receiver hardware stopped */
comm_flags = rst_req << COM_CHAN_RST_REQ_OFFSET;
__raw_writel((__force u32)cpu_to_be32(comm_flags),
(__iomem char *)priv->mfunc.comm + MLX4_COMM_CHAN_FLAGS);
comm_flags = rst_req << COM_CHAN_RST_REQ_OFFSET;
__raw_writel((__force u32)cpu_to_be32(comm_flags),
(__iomem char *)priv->mfunc.comm + MLX4_COMM_CHAN_FLAGS);
- /* Make sure that our comm channel write doesn't
- * get mixed in with writes from another CPU.
- */
- mmiowb();
end = msecs_to_jiffies(MLX4_COMM_TIME) + jiffies;
while (time_before(jiffies, end)) {
end = msecs_to_jiffies(MLX4_COMM_TIME) + jiffies;
while (time_before(jiffies, end)) {
val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
__raw_writel((__force u32) cpu_to_be32(val),
&priv->mfunc.comm->slave_write);
val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
__raw_writel((__force u32) cpu_to_be32(val),
&priv->mfunc.comm->slave_write);
mutex_unlock(&dev->persist->device_state_mutex);
return 0;
}
mutex_unlock(&dev->persist->device_state_mutex);
return 0;
}
(op_modifier << HCR_OPMOD_SHIFT) |
op), hcr + 6);
(op_modifier << HCR_OPMOD_SHIFT) |
op), hcr + 6);
- /*
- * Make sure that our HCR writes don't get mixed in with
- * writes from another CPU starting a FW command.
- */
- mmiowb();
-
cmd->toggle = cmd->toggle ^ 1;
ret = 0;
cmd->toggle = cmd->toggle ^ 1;
ret = 0;
}
__raw_writel((__force u32) cpu_to_be32(reply),
&priv->mfunc.comm[slave].slave_read);
}
__raw_writel((__force u32) cpu_to_be32(reply),
&priv->mfunc.comm[slave].slave_read);
&priv->mfunc.comm[i].slave_write);
__raw_writel((__force u32) 0,
&priv->mfunc.comm[i].slave_read);
&priv->mfunc.comm[i].slave_write);
__raw_writel((__force u32) 0,
&priv->mfunc.comm[i].slave_read);
for (port = 1; port <= MLX4_MAX_PORTS; port++) {
struct mlx4_vport_state *admin_vport;
struct mlx4_vport_state *oper_vport;
for (port = 1; port <= MLX4_MAX_PORTS; port++) {
struct mlx4_vport_state *admin_vport;
struct mlx4_vport_state *oper_vport;
slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
__raw_writel((__force u32)cpu_to_be32(slave_read),
&priv->mfunc.comm[slave].slave_read);
slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
__raw_writel((__force u32)cpu_to_be32(slave_read),
&priv->mfunc.comm[slave].slave_read);
- /* Make sure that our comm channel write doesn't
- * get mixed in with writes from another CPU.
- */
- mmiowb();
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb();
iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb();
iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
/* if not in polling don't use ent after this point */
if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
poll_timeout(ent);
/* if not in polling don't use ent after this point */
if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
poll_timeout(ent);
tx->queue_active = 0;
put_be32(htonl(1), tx->send_stop);
mb();
tx->queue_active = 0;
put_be32(htonl(1), tx->send_stop);
mb();
}
__netif_tx_unlock(dev_queue);
}
}
__netif_tx_unlock(dev_queue);
}
tx->queue_active = 1;
put_be32(htonl(1), tx->send_go);
mb();
tx->queue_active = 1;
put_be32(htonl(1), tx->send_go);
mb();
}
tx->pkt_start++;
if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
}
tx->pkt_start++;
if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
writeq(val64, &tx_fifo->List_Control);
writeq(val64, &tx_fifo->List_Control);
put_off++;
if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
put_off = 0;
put_off++;
if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
put_off = 0;
vxge_hw_channel_msix_unmask(
(struct __vxge_hw_channel *)ring->handle,
ring->rx_vector_no);
vxge_hw_channel_msix_unmask(
(struct __vxge_hw_channel *)ring->handle,
ring->rx_vector_no);
}
/* We are copying and returning the local variable, in case if after
}
/* We are copying and returning the local variable, in case if after
vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
fifo->tx_vector_no);
vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
fifo->tx_vector_no);
*/
vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
*/
vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
vdev->exec_mode);
if (status == VXGE_HW_OK) {
vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
msix_id);
status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
vdev->exec_mode);
if (status == VXGE_HW_OK) {
vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
msix_id);
continue;
}
vxge_debug_intr(VXGE_ERR,
continue;
}
vxge_debug_intr(VXGE_ERR,
VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
&fifo->nofl_db->control_0);
VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
&fifo->nofl_db->control_0);
writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
- /* Make certain HW write took affect */
- mmiowb();
-
index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
if (p_sb_desc->index != index) {
p_sb_desc->index = index;
rc = QED_SB_ATT_IDX;
}
index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
if (p_sb_desc->index != index) {
p_sb_desc->index = index;
rc = QED_SB_ATT_IDX;
}
- /* Make certain we got a consistent view with HW */
- mmiowb();
-
/* Both segments (interrupts & acks) are written to same place address;
* Need to guarantee all commands will be received (in-order) by HW.
*/
/* Both segments (interrupts & acks) are written to same place address;
* Need to guarantee all commands will be received (in-order) by HW.
*/
qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
- /* Flush the writes to IGU */
- mmiowb();
-
/* Unmask AEU signals toward IGU */
qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
}
/* Unmask AEU signals toward IGU */
qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
}
qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
- /* Flush the write to IGU */
- mmiowb();
-
/* calculate where to read the status bit from */
sb_bit = 1 << (igu_sb_id % 32);
sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
/* calculate where to read the status bit from */
sb_bit = 1 << (igu_sb_id % 32);
sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
REG_WR16(p_hwfn, addr, prod);
USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
REG_WR16(p_hwfn, addr, prod);
-
- /* keep prod updates ordered */
- mmiowb();
}
int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
}
int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
barrier();
writel(txq->tx_db.raw, txq->doorbell_addr);
barrier();
writel(txq->tx_db.raw, txq->doorbell_addr);
- /* mmiowb is needed to synchronize doorbell writes from more than one
- * processor. It guarantees that the write arrives to the device before
- * the queue lock is released and another start_xmit is called (possibly
- * on another CPU). Without this barrier, the next doorbell can bypass
- * this doorbell. This is applicable to IA64/Altix systems.
- */
- mmiowb();
-
for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
if (qede_txq_has_work(txq))
break;
for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
if (qede_txq_has_work(txq))
break;
internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
(u32 *)&rx_prods);
internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
(u32 *)&rx_prods);
-
- /* mmiowb is needed to synchronize doorbell writes from more than one
- * processor. It guarantees that the write arrives to the device before
- * the napi lock is released and another qede_poll is called (possibly
- * on another CPU). Without this barrier, the next doorbell can bypass
- * this doorbell. This is applicable to IA64/Altix systems.
- */
- mmiowb();
}
static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
}
static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
wmb();
writel_relaxed(qdev->small_buf_q_producer_index,
&port_regs->CommonRegs.rxSmallQProducerIndex);
wmb();
writel_relaxed(qdev->small_buf_q_producer_index,
&port_regs->CommonRegs.rxSmallQProducerIndex);
static inline void ql_write_db_reg(u32 val, void __iomem *addr)
{
writel(val, addr);
static inline void ql_write_db_reg(u32 val, void __iomem *addr)
{
writel(val, addr);
wmb();
ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
wmb();
ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
"tx queued, slot %d, len %d\n",
tx_ring->prod_idx, skb->len);
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
"tx queued, slot %d, len %d\n",
tx_ring->prod_idx, skb->len);
spin_lock(&priv->lock);
ravb_emac_interrupt_unlocked(ndev);
spin_lock(&priv->lock);
ravb_emac_interrupt_unlocked(ndev);
spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
spin_unlock(&priv->lock);
return result;
}
spin_unlock(&priv->lock);
return result;
}
spin_unlock(&priv->lock);
return result;
}
spin_unlock(&priv->lock);
return result;
}
if (ravb_queue_interrupt(ndev, q))
result = IRQ_HANDLED;
if (ravb_queue_interrupt(ndev, q))
result = IRQ_HANDLED;
spin_unlock(&priv->lock);
return result;
}
spin_unlock(&priv->lock);
return result;
}
ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q);
ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q);
spin_unlock_irqrestore(&priv->lock, flags);
}
}
spin_unlock_irqrestore(&priv->lock, flags);
}
}
ravb_write(ndev, mask, RIE0);
ravb_write(ndev, mask, TIE);
}
ravb_write(ndev, mask, RIE0);
ravb_write(ndev, mask, TIE);
}
spin_unlock_irqrestore(&priv->lock, flags);
/* Receive error message handling */
spin_unlock_irqrestore(&priv->lock, flags);
/* Receive error message handling */
if (priv->no_avb_link && phydev->link)
ravb_rcv_snd_enable(ndev);
if (priv->no_avb_link && phydev->link)
ravb_rcv_snd_enable(ndev);
spin_unlock_irqrestore(&priv->lock, flags);
if (new_state && netif_msg_link(priv))
spin_unlock_irqrestore(&priv->lock, flags);
if (new_state && netif_msg_link(priv))
netif_stop_subqueue(ndev, q);
exit:
netif_stop_subqueue(ndev, q);
exit:
spin_unlock_irqrestore(&priv->lock, flags);
return NETDEV_TX_OK;
spin_unlock_irqrestore(&priv->lock, flags);
return NETDEV_TX_OK;
spin_lock_irqsave(&priv->lock, flags);
ravb_modify(ndev, ECMR, ECMR_PRM,
ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
spin_lock_irqsave(&priv->lock, flags);
ravb_modify(ndev, ECMR, ECMR_PRM,
ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
spin_unlock_irqrestore(&priv->lock, flags);
}
spin_unlock_irqrestore(&priv->lock, flags);
}
ravb_write(ndev, GIE_PTCS, GIE);
else
ravb_write(ndev, GID_PTCD, GID);
ravb_write(ndev, GIE_PTCS, GIE);
else
ravb_write(ndev, GID_PTCD, GID);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
else
ravb_write(ndev, GID_PTMD0, GID);
}
else
ravb_write(ndev, GID_PTMD0, GID);
}
spin_unlock_irqrestore(&priv->lock, flags);
return error;
spin_unlock_irqrestore(&priv->lock, flags);
return error;
spin_lock_irqsave(&priv->lock, flags);
ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP);
spin_lock_irqsave(&priv->lock, flags);
ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP);
spin_unlock_irqrestore(&priv->lock, flags);
priv->ptp.clock = ptp_clock_register(&priv->ptp.info, &pdev->dev);
spin_unlock_irqrestore(&priv->lock, flags);
priv->ptp.clock = ptp_clock_register(&priv->ptp.info, &pdev->dev);
if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
sh_eth_rcv_snd_enable(ndev);
if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
sh_eth_rcv_snd_enable(ndev);
spin_unlock_irqrestore(&mdp->lock, flags);
if (new_state && netif_msg_link(mdp))
spin_unlock_irqrestore(&mdp->lock, flags);
if (new_state && netif_msg_link(mdp))
_ef4_writed(efx, value->u32[2], reg + 8);
_ef4_writed(efx, value->u32[3], reg + 12);
#endif
_ef4_writed(efx, value->u32[2], reg + 8);
_ef4_writed(efx, value->u32[3], reg + 12);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
__raw_writel((__force u32)value->u32[0], membase + addr);
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif
__raw_writel((__force u32)value->u32[0], membase + addr);
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
_efx_writed(efx, value->u32[2], reg + 8);
_efx_writed(efx, value->u32[3], reg + 12);
#endif
_efx_writed(efx, value->u32[2], reg + 8);
_efx_writed(efx, value->u32[3], reg + 12);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
__raw_writel((__force u32)value->u32[0], membase + addr);
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif
__raw_writel((__force u32)value->u32[0], membase + addr);
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
/* stop interrupts */
iowrite32(0, port_base + IntrMask);
_sc92031_dummy_read(port_base);
/* stop interrupts */
iowrite32(0, port_base + IntrMask);
_sc92031_dummy_read(port_base);
/* wait for any concurrent interrupt/tasklet to finish */
synchronize_irq(priv->pdev->irq);
/* wait for any concurrent interrupt/tasklet to finish */
synchronize_irq(priv->pdev->irq);
wmb();
iowrite32(IntrBits, port_base + IntrMask);
wmb();
iowrite32(IntrBits, port_base + IntrMask);
}
static void _sc92031_disable_tx_rx(struct net_device *dev)
}
static void _sc92031_disable_tx_rx(struct net_device *dev)
rmb();
iowrite32(intr_mask, port_base + IntrMask);
rmb();
iowrite32(intr_mask, port_base + IntrMask);
spin_unlock(&priv->lock);
}
spin_unlock(&priv->lock);
}
rmb();
iowrite32(intr_mask, port_base + IntrMask);
rmb();
iowrite32(intr_mask, port_base + IntrMask);
iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE,
port_base + TxAddr0 + entry * 4);
iowrite32(tx_status, port_base + TxStatus0 + entry * 4);
iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE,
port_base + TxAddr0 + entry * 4);
iowrite32(tx_status, port_base + TxStatus0 + entry * 4);
if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)
netif_stop_queue(dev);
if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)
netif_stop_queue(dev);
spin_lock_bh(&priv->lock);
_sc92031_reset(dev);
spin_lock_bh(&priv->lock);
_sc92031_reset(dev);
spin_unlock_bh(&priv->lock);
sc92031_enable_interrupts(dev);
spin_unlock_bh(&priv->lock);
sc92031_enable_interrupts(dev);
_sc92031_disable_tx_rx(dev);
_sc92031_tx_clear(dev);
_sc92031_disable_tx_rx(dev);
_sc92031_tx_clear(dev);
spin_unlock_bh(&priv->lock);
spin_unlock_bh(&priv->lock);
_sc92031_set_mar(dev);
_sc92031_set_rx_config(dev);
_sc92031_set_mar(dev);
_sc92031_set_rx_config(dev);
spin_unlock_bh(&priv->lock);
}
spin_unlock_bh(&priv->lock);
}
priv->tx_timeouts++;
_sc92031_reset(dev);
priv->tx_timeouts++;
_sc92031_reset(dev);
spin_unlock(&priv->lock);
spin_unlock(&priv->lock);
output_status = _sc92031_mii_read(port_base, MII_OutputStatus);
_sc92031_mii_scan(port_base);
output_status = _sc92031_mii_read(port_base, MII_OutputStatus);
_sc92031_mii_scan(port_base);
spin_unlock_bh(&priv->lock);
spin_unlock_bh(&priv->lock);
priv->pm_config = pm_config;
iowrite32(pm_config, port_base + PMConfig);
priv->pm_config = pm_config;
iowrite32(pm_config, port_base + PMConfig);
spin_unlock_bh(&priv->lock);
spin_unlock_bh(&priv->lock);
out:
_sc92031_mii_scan(port_base);
out:
_sc92031_mii_scan(port_base);
spin_unlock_bh(&priv->lock);
spin_unlock_bh(&priv->lock);
_sc92031_disable_tx_rx(dev);
_sc92031_tx_clear(dev);
_sc92031_disable_tx_rx(dev);
_sc92031_tx_clear(dev);
spin_unlock_bh(&priv->lock);
spin_unlock_bh(&priv->lock);
spin_lock_bh(&priv->lock);
_sc92031_reset(dev);
spin_lock_bh(&priv->lock);
_sc92031_reset(dev);
spin_unlock_bh(&priv->lock);
sc92031_enable_interrupts(dev);
spin_unlock_bh(&priv->lock);
sc92031_enable_interrupts(dev);
if (rp->quirks & rqStatusWBRace)
iowrite8(mask >> 16, ioaddr + IntrStatus2);
iowrite16(mask, ioaddr + IntrStatus);
if (rp->quirks & rqStatusWBRace)
iowrite8(mask >> 16, ioaddr + IntrStatus2);
iowrite16(mask, ioaddr + IntrStatus);
if (work_done < budget) {
napi_complete_done(napi, work_done);
iowrite16(enable_mask, ioaddr + IntrEnable);
if (work_done < budget) {
napi_complete_done(napi, work_done);
iowrite16(enable_mask, ioaddr + IntrEnable);
static void rhine_irq_disable(struct rhine_private *rp)
{
iowrite16(0x0000, rp->base + IntrEnable);
static void rhine_irq_disable(struct rhine_private *rp)
{
iowrite16(0x0000, rp->base + IntrEnable);
}
/* The interrupt handler does all of the Rx thread work and cleans up
}
/* The interrupt handler does all of the Rx thread work and cleans up
static inline int w5100_write_direct(struct net_device *ndev, u32 addr, u8 data)
{
__w5100_write_direct(ndev, addr, data);
static inline int w5100_write_direct(struct net_device *ndev, u32 addr, u8 data)
{
__w5100_write_direct(ndev, addr, data);
{
__w5100_write_direct(ndev, addr, data >> 8);
__w5100_write_direct(ndev, addr + 1, data);
{
__w5100_write_direct(ndev, addr, data >> 8);
__w5100_write_direct(ndev, addr + 1, data);
for (i = 0; i < len; i++, addr++)
__w5100_write_direct(ndev, addr, *buf++);
for (i = 0; i < len; i++, addr++)
__w5100_write_direct(ndev, addr, *buf++);
for (i = 0; i < len; i++)
*buf++ = w5100_read_direct(ndev, W5100_IDM_DR);
for (i = 0; i < len; i++)
*buf++ = w5100_read_direct(ndev, W5100_IDM_DR);
spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
return 0;
spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
return 0;
for (i = 0; i < len; i++)
__w5100_write_direct(ndev, W5100_IDM_DR, *buf++);
for (i = 0; i < len; i++)
__w5100_write_direct(ndev, W5100_IDM_DR, *buf++);
spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
return 0;
spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
return 0;
spin_lock_irqsave(&priv->reg_lock, flags);
w5300_write_direct(priv, W5300_IDM_AR, addr);
spin_lock_irqsave(&priv->reg_lock, flags);
w5300_write_direct(priv, W5300_IDM_AR, addr);
data = w5300_read_direct(priv, W5300_IDM_DR);
spin_unlock_irqrestore(&priv->reg_lock, flags);
data = w5300_read_direct(priv, W5300_IDM_DR);
spin_unlock_irqrestore(&priv->reg_lock, flags);
spin_lock_irqsave(&priv->reg_lock, flags);
w5300_write_direct(priv, W5300_IDM_AR, addr);
spin_lock_irqsave(&priv->reg_lock, flags);
w5300_write_direct(priv, W5300_IDM_AR, addr);
w5300_write_direct(priv, W5300_IDM_DR, data);
w5300_write_direct(priv, W5300_IDM_DR, data);
spin_unlock_irqrestore(&priv->reg_lock, flags);
}
spin_unlock_irqrestore(&priv->reg_lock, flags);
}
unsigned long timeout = jiffies + msecs_to_jiffies(100);
w5300_write(priv, W5300_S0_CR, cmd);
unsigned long timeout = jiffies + msecs_to_jiffies(100);
w5300_write(priv, W5300_S0_CR, cmd);
while (w5300_read(priv, W5300_S0_CR) != 0) {
if (time_after(jiffies, timeout))
while (w5300_read(priv, W5300_S0_CR) != 0) {
if (time_after(jiffies, timeout))
w5300_write(priv, W5300_SHARH,
ndev->dev_addr[4] << 8 |
ndev->dev_addr[5]);
w5300_write(priv, W5300_SHARH,
ndev->dev_addr[4] << 8 |
ndev->dev_addr[5]);
}
static void w5300_hw_reset(struct w5300_priv *priv)
{
w5300_write_direct(priv, W5300_MR, MR_RST);
}
static void w5300_hw_reset(struct w5300_priv *priv)
{
w5300_write_direct(priv, W5300_MR, MR_RST);
mdelay(5);
w5300_write_direct(priv, W5300_MR, priv->indirect ?
MR_WDF(7) | MR_PB | MR_IND :
MR_WDF(7) | MR_PB);
mdelay(5);
w5300_write_direct(priv, W5300_MR, priv->indirect ?
MR_WDF(7) | MR_PB | MR_IND :
MR_WDF(7) | MR_PB);
w5300_write(priv, W5300_IMR, 0);
w5300_write_macaddr(priv);
w5300_write(priv, W5300_IMR, 0);
w5300_write_macaddr(priv);
w5300_write32(priv, W5300_TMSRL, 64 << 24);
w5300_write32(priv, W5300_TMSRH, 0);
w5300_write(priv, W5300_MTYPE, 0x00ff);
w5300_write32(priv, W5300_TMSRL, 64 << 24);
w5300_write32(priv, W5300_TMSRH, 0);
w5300_write(priv, W5300_MTYPE, 0x00ff);
}
static void w5300_hw_start(struct w5300_priv *priv)
{
w5300_write(priv, W5300_S0_MR, priv->promisc ?
S0_MR_MACRAW : S0_MR_MACRAW_MF);
}
static void w5300_hw_start(struct w5300_priv *priv)
{
w5300_write(priv, W5300_S0_MR, priv->promisc ?
S0_MR_MACRAW : S0_MR_MACRAW_MF);
w5300_command(priv, S0_CR_OPEN);
w5300_write(priv, W5300_S0_IMR, S0_IR_RECV | S0_IR_SENDOK);
w5300_write(priv, W5300_IMR, IR_S0);
w5300_command(priv, S0_CR_OPEN);
w5300_write(priv, W5300_S0_IMR, S0_IR_RECV | S0_IR_SENDOK);
w5300_write(priv, W5300_IMR, IR_S0);
}
static void w5300_hw_close(struct w5300_priv *priv)
{
w5300_write(priv, W5300_IMR, 0);
}
static void w5300_hw_close(struct w5300_priv *priv)
{
w5300_write(priv, W5300_IMR, 0);
w5300_command(priv, S0_CR_CLOSE);
}
w5300_command(priv, S0_CR_CLOSE);
}
netif_stop_queue(ndev);
w5300_write_frame(priv, skb->data, skb->len);
netif_stop_queue(ndev);
w5300_write_frame(priv, skb->data, skb->len);
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
dev_kfree_skb(skb);
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
dev_kfree_skb(skb);
if (rx_count < budget) {
napi_complete_done(napi, rx_count);
w5300_write(priv, W5300_IMR, IR_S0);
if (rx_count < budget) {
napi_complete_done(napi, rx_count);
w5300_write(priv, W5300_IMR, IR_S0);
if (!ir)
return IRQ_NONE;
w5300_write(priv, W5300_S0_IR, ir);
if (!ir)
return IRQ_NONE;
w5300_write(priv, W5300_S0_IR, ir);
if (ir & S0_IR_SENDOK) {
netif_dbg(priv, tx_done, ndev, "tx done\n");
if (ir & S0_IR_SENDOK) {
netif_dbg(priv, tx_done, ndev, "tx done\n");
if (ir & S0_IR_RECV) {
if (napi_schedule_prep(&priv->napi)) {
w5300_write(priv, W5300_IMR, 0);
if (ir & S0_IR_RECV) {
if (napi_schedule_prep(&priv->napi)) {
w5300_write(priv, W5300_IMR, 0);
__napi_schedule(&priv->napi);
}
}
__napi_schedule(&priv->napi);
}
}
txq->link = &ds->ds_link;
ath5k_hw_start_tx_dma(ah, txq->qnum);
txq->link = &ds->ds_link;
ath5k_hw_start_tx_dma(ah, txq->qnum);
spin_unlock_bh(&txq->lock);
return 0;
spin_unlock_bh(&txq->lock);
return 0;
}
ath5k_hw_set_imr(ah, ah->imask);
}
ath5k_hw_set_imr(ah, ah->imask);
spin_unlock_bh(&ah->block);
}
spin_unlock_bh(&ah->block);
}
mutex_unlock(&ah->lock);
set_bit(ATH_STAT_STARTED, ah->status);
mutex_unlock(&ah->lock);
set_bit(ATH_STAT_STARTED, ah->status);
"putting device to sleep\n");
}
"putting device to sleep\n");
}
mutex_unlock(&ah->lock);
ath5k_stop_tasklets(ah);
mutex_unlock(&ah->lock);
ath5k_stop_tasklets(ah);
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
common->curaid = 0;
ath5k_hw_set_bssid(ah);
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
common->curaid = 0;
ath5k_hw_set_bssid(ah);
}
if (changes & BSS_CHANGED_BEACON_INT)
}
if (changes & BSS_CHANGED_BEACON_INT)
mutex_unlock(&ah->lock);
return ret;
}
mutex_unlock(&ah->lock);
return ret;
}
val = swab32(val);
b43_write32(dev, B43_MMIO_RAM_CONTROL, offset);
val = swab32(val);
b43_write32(dev, B43_MMIO_RAM_CONTROL, offset);
b43_write32(dev, B43_MMIO_RAM_DATA, val);
}
b43_write32(dev, B43_MMIO_RAM_DATA, val);
}
/* The hardware guarantees us an atomic write, if we
* write the low register first. */
b43_write32(dev, B43_MMIO_REV3PLUS_TSF_LOW, low);
/* The hardware guarantees us an atomic write, if we
* write the low register first. */
b43_write32(dev, B43_MMIO_REV3PLUS_TSF_LOW, low);
b43_write32(dev, B43_MMIO_REV3PLUS_TSF_HIGH, high);
b43_write32(dev, B43_MMIO_REV3PLUS_TSF_HIGH, high);
}
void b43_tsf_write(struct b43_wldev *dev, u64 tsf)
}
void b43_tsf_write(struct b43_wldev *dev, u64 tsf)
if (b43_bus_host_is_sdio(dev->dev)) {
/* wl->mutex is enough. */
b43_do_beacon_update_trigger_work(dev);
if (b43_bus_host_is_sdio(dev->dev)) {
/* wl->mutex is enough. */
b43_do_beacon_update_trigger_work(dev);
} else {
spin_lock_irq(&wl->hardirq_lock);
b43_do_beacon_update_trigger_work(dev);
} else {
spin_lock_irq(&wl->hardirq_lock);
b43_do_beacon_update_trigger_work(dev);
spin_unlock_irq(&wl->hardirq_lock);
}
}
spin_unlock_irq(&wl->hardirq_lock);
}
}
mutex_lock(&dev->wl->mutex);
b43_do_interrupt_thread(dev);
mutex_lock(&dev->wl->mutex);
b43_do_interrupt_thread(dev);
mutex_unlock(&dev->wl->mutex);
return IRQ_HANDLED;
mutex_unlock(&dev->wl->mutex);
return IRQ_HANDLED;
spin_lock(&dev->wl->hardirq_lock);
ret = b43_do_interrupt(dev);
spin_lock(&dev->wl->hardirq_lock);
ret = b43_do_interrupt(dev);
spin_unlock(&dev->wl->hardirq_lock);
return ret;
spin_unlock(&dev->wl->hardirq_lock);
return ret;
mutex_unlock(&wldev->wl->mutex);
return err ? err : count;
mutex_unlock(&wldev->wl->mutex);
return err ? err : count;
void b43legacy_ilt_write(struct b43legacy_wldev *dev, u16 offset, u16 val)
{
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset);
void b43legacy_ilt_write(struct b43legacy_wldev *dev, u16 offset, u16 val)
{
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset);
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1, val);
}
void b43legacy_ilt_write32(struct b43legacy_wldev *dev, u16 offset, u32 val)
{
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset);
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1, val);
}
void b43legacy_ilt_write32(struct b43legacy_wldev *dev, u16 offset, u32 val)
{
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset);
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA2,
(val & 0xFFFF0000) >> 16);
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1,
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA2,
(val & 0xFFFF0000) >> 16);
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1,
val = swab32(val);
b43legacy_write32(dev, B43legacy_MMIO_RAM_CONTROL, offset);
val = swab32(val);
b43legacy_write32(dev, B43legacy_MMIO_RAM_CONTROL, offset);
b43legacy_write32(dev, B43legacy_MMIO_RAM_DATA, val);
}
b43legacy_write32(dev, B43legacy_MMIO_RAM_DATA, val);
}
if (offset & 0x0003) {
/* Unaligned access */
b43legacy_shm_control_word(dev, routing, offset >> 2);
if (offset & 0x0003) {
/* Unaligned access */
b43legacy_shm_control_word(dev, routing, offset >> 2);
b43legacy_write16(dev,
B43legacy_MMIO_SHM_DATA_UNALIGNED,
(value >> 16) & 0xffff);
b43legacy_write16(dev,
B43legacy_MMIO_SHM_DATA_UNALIGNED,
(value >> 16) & 0xffff);
b43legacy_shm_control_word(dev, routing,
(offset >> 2) + 1);
b43legacy_shm_control_word(dev, routing,
(offset >> 2) + 1);
b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA,
value & 0xffff);
return;
b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA,
value & 0xffff);
return;
offset >>= 2;
}
b43legacy_shm_control_word(dev, routing, offset);
offset >>= 2;
}
b43legacy_shm_control_word(dev, routing, offset);
b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA, value);
}
b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA, value);
}
if (offset & 0x0003) {
/* Unaligned access */
b43legacy_shm_control_word(dev, routing, offset >> 2);
if (offset & 0x0003) {
/* Unaligned access */
b43legacy_shm_control_word(dev, routing, offset >> 2);
b43legacy_write16(dev,
B43legacy_MMIO_SHM_DATA_UNALIGNED,
value);
b43legacy_write16(dev,
B43legacy_MMIO_SHM_DATA_UNALIGNED,
value);
offset >>= 2;
}
b43legacy_shm_control_word(dev, routing, offset);
offset >>= 2;
}
b43legacy_shm_control_word(dev, routing, offset);
b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA, value);
}
b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA, value);
}
status = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
status |= B43legacy_MACCTL_TBTTHOLD;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status);
status = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
status |= B43legacy_MACCTL_TBTTHOLD;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status);
}
static void b43legacy_time_unlock(struct b43legacy_wldev *dev)
}
static void b43legacy_time_unlock(struct b43legacy_wldev *dev)
u32 hi = (tsf & 0xFFFFFFFF00000000ULL) >> 32;
b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_LOW, 0);
u32 hi = (tsf & 0xFFFFFFFF00000000ULL) >> 32;
b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_LOW, 0);
b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_HIGH,
hi);
b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_HIGH,
hi);
b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_LOW,
lo);
} else {
b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_LOW,
lo);
} else {
u16 v3 = (tsf & 0xFFFF000000000000ULL) >> 48;
b43legacy_write16(dev, B43legacy_MMIO_TSF_0, 0);
u16 v3 = (tsf & 0xFFFF000000000000ULL) >> 48;
b43legacy_write16(dev, B43legacy_MMIO_TSF_0, 0);
b43legacy_write16(dev, B43legacy_MMIO_TSF_3, v3);
b43legacy_write16(dev, B43legacy_MMIO_TSF_3, v3);
b43legacy_write16(dev, B43legacy_MMIO_TSF_2, v2);
b43legacy_write16(dev, B43legacy_MMIO_TSF_2, v2);
b43legacy_write16(dev, B43legacy_MMIO_TSF_1, v1);
b43legacy_write16(dev, B43legacy_MMIO_TSF_1, v1);
b43legacy_write16(dev, B43legacy_MMIO_TSF_0, v0);
}
}
b43legacy_write16(dev, B43legacy_MMIO_TSF_0, v0);
}
}
/* The handler might have updated the IRQ mask. */
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK,
dev->irq_mask);
/* The handler might have updated the IRQ mask. */
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK,
dev->irq_mask);
spin_unlock_irq(&wl->irq_lock);
}
mutex_unlock(&wl->mutex);
spin_unlock_irq(&wl->irq_lock);
}
mutex_unlock(&wl->mutex);
dma_reason[2], dma_reason[3],
dma_reason[4], dma_reason[5]);
b43legacy_controller_restart(dev, "DMA error");
dma_reason[2], dma_reason[3],
dma_reason[4], dma_reason[5]);
b43legacy_controller_restart(dev, "DMA error");
spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
return;
}
spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
return;
}
handle_irq_transmit_status(dev);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
handle_irq_transmit_status(dev);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
}
spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
}
dev->irq_reason = reason;
tasklet_schedule(&dev->isr_tasklet);
out:
dev->irq_reason = reason;
tasklet_schedule(&dev->isr_tasklet);
out:
spin_unlock(&dev->wl->irq_lock);
return ret;
spin_unlock(&dev->wl->irq_lock);
return ret;
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
spin_unlock_irqrestore(&wl->irq_lock, flags);
out_unlock_mutex:
mutex_unlock(&wl->mutex);
spin_unlock_irqrestore(&wl->irq_lock, flags);
out_unlock_mutex:
mutex_unlock(&wl->mutex);
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
/* XXX: why? */
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
/* XXX: why? */
spin_unlock_irqrestore(&wl->irq_lock, flags);
out_unlock_mutex:
mutex_unlock(&wl->mutex);
spin_unlock_irqrestore(&wl->irq_lock, flags);
out_unlock_mutex:
mutex_unlock(&wl->mutex);
void b43legacy_phy_write(struct b43legacy_wldev *dev, u16 offset, u16 val)
{
b43legacy_write16(dev, B43legacy_MMIO_PHY_CONTROL, offset);
void b43legacy_phy_write(struct b43legacy_wldev *dev, u16 offset, u16 val)
{
b43legacy_write16(dev, B43legacy_MMIO_PHY_CONTROL, offset);
b43legacy_write16(dev, B43legacy_MMIO_PHY_DATA, val);
}
b43legacy_write16(dev, B43legacy_MMIO_PHY_DATA, val);
}
u16 offset, u16 value)
{
b43legacy_write16(queue->dev, queue->mmio_base + offset, value);
u16 offset, u16 value)
{
b43legacy_write16(queue->dev, queue->mmio_base + offset, value);
B43legacy_WARN_ON(status & B43legacy_MACCTL_RADIOLOCK);
status |= B43legacy_MACCTL_RADIOLOCK;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status);
B43legacy_WARN_ON(status & B43legacy_MACCTL_RADIOLOCK);
status |= B43legacy_MACCTL_RADIOLOCK;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status);
B43legacy_WARN_ON(!(status & B43legacy_MACCTL_RADIOLOCK));
status &= ~B43legacy_MACCTL_RADIOLOCK;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status);
B43legacy_WARN_ON(!(status & B43legacy_MACCTL_RADIOLOCK));
status &= ~B43legacy_MACCTL_RADIOLOCK;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status);
}
u16 b43legacy_radio_read16(struct b43legacy_wldev *dev, u16 offset)
}
u16 b43legacy_radio_read16(struct b43legacy_wldev *dev, u16 offset)
void b43legacy_radio_write16(struct b43legacy_wldev *dev, u16 offset, u16 val)
{
b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL, offset);
void b43legacy_radio_write16(struct b43legacy_wldev *dev, u16 offset, u16 val)
{
b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL, offset);
b43legacy_write16(dev, B43legacy_MMIO_RADIO_DATA_LOW, val);
}
b43legacy_write16(dev, B43legacy_MMIO_RADIO_DATA_LOW, val);
}
void b43legacy_nrssi_hw_write(struct b43legacy_wldev *dev, u16 offset, s16 val)
{
b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_CTRL, offset);
void b43legacy_nrssi_hw_write(struct b43legacy_wldev *dev, u16 offset, s16 val)
{
b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_CTRL, offset);
b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_DATA, (u16)val);
}
b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_DATA, (u16)val);
}
if (err)
b43legacyerr(wldev->wl, "Interference Mitigation not "
"supported by device\n");
if (err)
b43legacyerr(wldev->wl, "Interference Mitigation not "
"supported by device\n");
spin_unlock_irqrestore(&wldev->wl->irq_lock, flags);
mutex_unlock(&wldev->wl->mutex);
spin_unlock_irqrestore(&wldev->wl->irq_lock, flags);
mutex_unlock(&wldev->wl->mutex);
_il_release_nic_access(struct il_priv *il)
{
_il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
_il_release_nic_access(struct il_priv *il)
{
_il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- /*
- * In above we are reading CSR_GP_CNTRL register, what will flush any
- * previous writes, but still want write, which clear MAC_ACCESS_REQ
- * bit, be performed on PCI bus before any other writes scheduled on
- * different CPUs (after we drop reg_lock).
- */
- mmiowb();
* MAC_ACCESS_REQ bit to be performed before any other writes
* scheduled on different CPUs (after we drop reg_lock).
*/
* MAC_ACCESS_REQ bit to be performed before any other writes
* scheduled on different CPUs (after we drop reg_lock).
*/
out:
spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
}
out:
spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
}
iowrite32((u32)reg, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASAADDR);
/* Put the new value of the register */
iowrite32(data, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASADATA);
iowrite32((u32)reg, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASAADDR);
/* Put the new value of the register */
iowrite32(data, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASADATA);
- /* Make sure the PCIe transactions are executed */
- mmiowb();
/* Unlock GASA registers operations */
spin_unlock_irqrestore(&ndev->gasa_lock, irqflags);
}
/* Unlock GASA registers operations */
spin_unlock_irqrestore(&ndev->gasa_lock, irqflags);
}
spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
idt_nt_write(ndev, IDT_NT_NTMTBLDATA, mtbldata);
spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
idt_nt_write(ndev, IDT_NT_NTMTBLDATA, mtbldata);
spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
/* Notify the peers by setting and clearing the global signal bit */
spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
/* Notify the peers by setting and clearing the global signal bit */
spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
idt_nt_write(ndev, IDT_NT_NTMTBLDATA, 0);
spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
idt_nt_write(ndev, IDT_NT_NTMTBLDATA, 0);
spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
/* Notify the peers by setting and clearing the global signal bit */
spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
/* Notify the peers by setting and clearing the global signal bit */
idt_nt_write(ndev, IDT_NT_LUTLDATA, (u32)addr);
idt_nt_write(ndev, IDT_NT_LUTMDATA, (u32)(addr >> 32));
idt_nt_write(ndev, IDT_NT_LUTUDATA, data);
idt_nt_write(ndev, IDT_NT_LUTLDATA, (u32)addr);
idt_nt_write(ndev, IDT_NT_LUTMDATA, (u32)(addr >> 32));
idt_nt_write(ndev, IDT_NT_LUTUDATA, data);
spin_unlock_irqrestore(&ndev->lut_lock, irqflags);
/* Limit address isn't specified since size is fixed for LUT */
}
spin_unlock_irqrestore(&ndev->lut_lock, irqflags);
/* Limit address isn't specified since size is fixed for LUT */
}
idt_nt_write(ndev, IDT_NT_LUTLDATA, 0);
idt_nt_write(ndev, IDT_NT_LUTMDATA, 0);
idt_nt_write(ndev, IDT_NT_LUTUDATA, 0);
idt_nt_write(ndev, IDT_NT_LUTLDATA, 0);
idt_nt_write(ndev, IDT_NT_LUTMDATA, 0);
idt_nt_write(ndev, IDT_NT_LUTUDATA, 0);
spin_unlock_irqrestore(&ndev->lut_lock, irqflags);
}
spin_unlock_irqrestore(&ndev->lut_lock, irqflags);
}
/* Set the route and send the data */
idt_sw_write(ndev, partdata_tbl[ndev->part].msgctl[midx], swpmsgctl);
idt_nt_write(ndev, ntdata_tbl.msgs[midx].out, msg);
/* Set the route and send the data */
idt_sw_write(ndev, partdata_tbl[ndev->part].msgctl[midx], swpmsgctl);
idt_nt_write(ndev, ntdata_tbl.msgs[midx].out, msg);
/* Unlock the messages routing table */
spin_unlock_irqrestore(&ndev->msg_locks[midx], irqflags);
/* Unlock the messages routing table */
spin_unlock_irqrestore(&ndev->msg_locks[midx], irqflags);
ntb_peer_spad_write(perf->ntb, peer->pidx,
PERF_SPAD_HDATA(perf->gidx),
upper_32_bits(data));
ntb_peer_spad_write(perf->ntb, peer->pidx,
PERF_SPAD_HDATA(perf->gidx),
upper_32_bits(data));
ntb_peer_spad_write(perf->ntb, peer->pidx,
PERF_SPAD_CMD(perf->gidx),
cmd);
ntb_peer_spad_write(perf->ntb, peer->pidx,
PERF_SPAD_CMD(perf->gidx),
cmd);
ntb_peer_db_set(perf->ntb, PERF_SPAD_NOTIFY(peer->gidx));
dev_dbg(&perf->ntb->dev, "DB ring peer %#llx\n",
ntb_peer_db_set(perf->ntb, PERF_SPAD_NOTIFY(peer->gidx));
dev_dbg(&perf->ntb->dev, "DB ring peer %#llx\n",
ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_HDATA,
upper_32_bits(data));
ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_HDATA,
upper_32_bits(data));
/* This call shall trigger peer message event */
ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_CMD, cmd);
/* This call shall trigger peer message event */
ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_CMD, cmd);
((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
writel((__bfa)->iocfc.req_cq_pi[__reqq], \
(__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq]); \
((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
writel((__bfa)->iocfc.req_cq_pi[__reqq], \
(__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq]); \
- mmiowb(); \
- } while (0)
#define bfa_rspq_pi(__bfa, __rspq) \
(*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva))
#define bfa_rspq_pi(__bfa, __rspq) \
(*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva))
bfa_rspq_ci(bfa, rspq) = ci;
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
bfa_rspq_ci(bfa, rspq) = ci;
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
bfa_rspq_ci(bfa, rspq) = ci;
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
bfa_rspq_ci(bfa, rspq) = ci;
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
bfa_rspq_ci(bfa, rspq) = ci;
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
bfa_rspq_ci(bfa, rspq) = ci;
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
{
bfa_rspq_ci(bfa, rspq) = ci;
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
{
bfa_rspq_ci(bfa, rspq) = ci;
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
FCOE_CQE_TOGGLE_BIT_SHIFT);
msg = *((u32 *)rx_db);
writel(cpu_to_le32(msg), tgt->ctx_base);
FCOE_CQE_TOGGLE_BIT_SHIFT);
msg = *((u32 *)rx_db);
writel(cpu_to_le32(msg), tgt->ctx_base);
(tgt->sq_curr_toggle_bit << 15);
msg = *((u32 *)sq_db);
writel(cpu_to_le32(msg), tgt->ctx_base);
(tgt->sq_curr_toggle_bit << 15);
msg = *((u32 *)sq_db);
writel(cpu_to_le32(msg), tgt->ctx_base);
writew(ep->qp.rq_prod_idx,
ep->qp.ctx_base + CNIC_RECV_DOORBELL);
}
writew(ep->qp.rq_prod_idx,
ep->qp.ctx_base + CNIC_RECV_DOORBELL);
}
bnx2i_ring_577xx_doorbell(bnx2i_conn);
} else
writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
bnx2i_ring_577xx_doorbell(bnx2i_conn);
} else
writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
&(regs)->inbound_high_queue_port);
writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
&(regs)->inbound_low_queue_port);
&(regs)->inbound_high_queue_port);
writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
&(regs)->inbound_low_queue_port);
spin_unlock_irqrestore(&instance->hba_lock, flags);
}
spin_unlock_irqrestore(&instance->hba_lock, flags);
}
&instance->reg_set->inbound_low_queue_port);
writel(le32_to_cpu(req_desc->u.high),
&instance->reg_set->inbound_high_queue_port);
&instance->reg_set->inbound_low_queue_port);
writel(le32_to_cpu(req_desc->u.high),
&instance->reg_set->inbound_high_queue_port);
spin_unlock_irqrestore(&instance->hba_lock, flags);
#endif
}
spin_unlock_irqrestore(&instance->hba_lock, flags);
#endif
}
spin_lock_irqsave(writeq_lock, flags);
__raw_writel((u32)(b), addr);
__raw_writel((u32)(b >> 32), (addr + 4));
spin_lock_irqsave(writeq_lock, flags);
__raw_writel((u32)(b), addr);
__raw_writel((u32)(b >> 32), (addr + 4));
spin_unlock_irqrestore(writeq_lock, flags);
}
spin_unlock_irqrestore(writeq_lock, flags);
}
writel(*(u32 *)&dbell, fcport->p_doorbell);
/* Make sure SQ index is updated so f/w prcesses requests in order */
wmb();
writel(*(u32 *)&dbell, fcport->p_doorbell);
/* Make sure SQ index is updated so f/w prcesses requests in order */
wmb();
}
static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
}
static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
* others they are two different assembly operations.
*/
wmb();
* others they are two different assembly operations.
*/
wmb();
QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ,
"prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n",
qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx,
QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ,
"prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n",
qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx,
sp->flags |= SRB_SENT;
ha->actthreads++;
WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
sp->flags |= SRB_SENT;
ha->actthreads++;
WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
- /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
- mmiowb();
sp->flags |= SRB_SENT;
ha->actthreads++;
WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
sp->flags |= SRB_SENT;
ha->actthreads++;
WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
- /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
- mmiowb();
* See Documentation/driver-api/device-io.rst for more information.
*/
WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
* See Documentation/driver-api/device-io.rst for more information.
*/
WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
LEAVE("qla1280_isp_cmd");
}
LEAVE("qla1280_isp_cmd");
}
else if (i % 2)
pr_cont(".");
writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2));
else if (i % 2)
pr_cont(".");
writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2));
msleep(20);
}
err = pci_read_config_dword(pdev, SSB_SPROMCTL, &spromctl);
msleep(20);
}
err = pci_read_config_dword(pdev, SSB_SPROMCTL, &spromctl);
err = select_core_and_segment(dev, &offset);
if (likely(!err))
writeb(value, bus->mmio + offset);
err = select_core_and_segment(dev, &offset);
if (likely(!err))
writeb(value, bus->mmio + offset);
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
err = select_core_and_segment(dev, &offset);
if (likely(!err))
writew(value, bus->mmio + offset);
err = select_core_and_segment(dev, &offset);
if (likely(!err))
writew(value, bus->mmio + offset);
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
writew((value & 0x0000FFFF), bus->mmio + offset);
writew(((value & 0xFFFF0000) >> 16), bus->mmio + offset + 2);
}
writew((value & 0x0000FFFF), bus->mmio + offset);
writew(((value & 0xFFFF0000) >> 16), bus->mmio + offset + 2);
}
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
#endif /* CONFIG_SSB_BLOCKIO */
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
#endif /* CONFIG_SSB_BLOCKIO */
writel(CHOR_CLRDONE,
mite->mmio + MITE_CHOR(mite_chan->channel));
}
writel(CHOR_CLRDONE,
mite->mmio + MITE_CHOR(mite_chan->channel));
}
spin_unlock_irqrestore(&mite->lock, flags);
return status;
}
spin_unlock_irqrestore(&mite->lock, flags);
return status;
}
mite_chan->done = 0;
/* arm */
writel(CHOR_START, mite->mmio + MITE_CHOR(mite_chan->channel));
mite_chan->done = 0;
/* arm */
writel(CHOR_START, mite->mmio + MITE_CHOR(mite_chan->channel));
spin_unlock_irqrestore(&mite->lock, flags);
}
EXPORT_SYMBOL_GPL(mite_dma_arm);
spin_unlock_irqrestore(&mite->lock, flags);
}
EXPORT_SYMBOL_GPL(mite_dma_arm);
CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
mite->mmio + MITE_CHCR(mite_chan->channel));
mite_chan->ring = NULL;
CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
mite->mmio + MITE_CHCR(mite_chan->channel));
mite_chan->ring = NULL;
}
spin_unlock_irqrestore(&mite->lock, flags);
}
}
spin_unlock_irqrestore(&mite->lock, flags);
}
ni_660x_write(dev, chip, devpriv->dma_cfg[chip] |
NI660X_DMA_CFG_RESET(mite_channel),
NI660X_DMA_CFG);
ni_660x_write(dev, chip, devpriv->dma_cfg[chip] |
NI660X_DMA_CFG_RESET(mite_channel),
NI660X_DMA_CFG);
}
static inline void ni_660x_unset_dma_channel(struct comedi_device *dev,
}
static inline void ni_660x_unset_dma_channel(struct comedi_device *dev,
devpriv->dma_cfg[chip] &= ~NI660X_DMA_CFG_SEL_MASK(mite_channel);
devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL_NONE(mite_channel);
ni_660x_write(dev, chip, devpriv->dma_cfg[chip], NI660X_DMA_CFG);
devpriv->dma_cfg[chip] &= ~NI660X_DMA_CFG_SEL_MASK(mite_channel);
devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL_NONE(mite_channel);
ni_660x_write(dev, chip, devpriv->dma_cfg[chip], NI660X_DMA_CFG);
}
static int ni_660x_request_mite_channel(struct comedi_device *dev,
}
static int ni_660x_request_mite_channel(struct comedi_device *dev,
spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
}
spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
}
writeb(primary_DMAChannel_bits(devpriv->di_mite_chan->channel) |
secondary_DMAChannel_bits(devpriv->di_mite_chan->channel),
dev->mmio + DMA_LINE_CONTROL_GROUP1);
writeb(primary_DMAChannel_bits(devpriv->di_mite_chan->channel) |
secondary_DMAChannel_bits(devpriv->di_mite_chan->channel),
dev->mmio + DMA_LINE_CONTROL_GROUP1);
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
}
writeb(primary_DMAChannel_bits(0) |
secondary_DMAChannel_bits(0),
dev->mmio + DMA_LINE_CONTROL_GROUP1);
writeb(primary_DMAChannel_bits(0) |
secondary_DMAChannel_bits(0),
dev->mmio + DMA_LINE_CONTROL_GROUP1);
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
}
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
}
regs[reg] &= ~mask;
regs[reg] |= (value & mask);
ni_tio_write(counter, regs[reg] | transient, reg);
regs[reg] &= ~mask;
regs[reg] |= (value & mask);
ni_tio_write(counter, regs[reg] | transient, reg);
spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
}
}
spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
}
}
{
unsigned int val = (cmd << 16) | cmd;
{
unsigned int val = (cmd << 16) | cmd;
writel(val, dev->mmio + reg);
}
writel(val, dev->mmio + reg);
}
unsigned int cmd, unsigned int reg)
{
writel(cmd << 16, dev->mmio + reg);
unsigned int cmd, unsigned int reg)
{
writel(cmd << 16, dev->mmio + reg);
}
static bool s626_mc_test(struct comedi_device *dev,
}
static bool s626_mc_test(struct comedi_device *dev,
memcpy_toio(port->membase + MEN_Z135_TX_RAM, &xmit->buf[xmit->tail], n);
xmit->tail = (xmit->tail + n) & (UART_XMIT_SIZE - 1);
memcpy_toio(port->membase + MEN_Z135_TX_RAM, &xmit->buf[xmit->tail], n);
xmit->tail = (xmit->tail + n) & (UART_XMIT_SIZE - 1);
iowrite32(n & 0x3ff, port->membase + MEN_Z135_TX_CTRL);
iowrite32(n & 0x3ff, port->membase + MEN_Z135_TX_CTRL);
sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST);
/* TX4925 BUG WORKAROUND. Accessing SIOC register
* immediately after soft reset causes bus error. */
sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST);
/* TX4925 BUG WORKAROUND. Accessing SIOC register
* immediately after soft reset causes bus error. */
udelay(1);
while ((sio_in(up, TXX9_SIFCR) & TXX9_SIFCR_SWRST) && --tmout)
udelay(1);
udelay(1);
while ((sio_in(up, TXX9_SIFCR) & TXX9_SIFCR_SWRST) && --tmout)
udelay(1);
ret = xdbc_start();
if (ret < 0)
goto reset_out;
ret = xdbc_start();
if (ret < 0)
goto reset_out;
ret = xdbc_start();
if (ret < 0) {
writel(0, &xdbc.xdbc_reg->control);
ret = xdbc_start();
if (ret < 0) {
writel(0, &xdbc.xdbc_reg->control);
string_length = xhci_dbc_populate_strings(dbc->string);
xhci_dbc_init_contexts(xhci, string_length);
string_length = xhci_dbc_populate_strings(dbc->string);
xhci_dbc_init_contexts(xhci, string_length);
xhci_dbc_eps_init(xhci);
dbc->state = DS_INITIALIZED;
xhci_dbc_eps_init(xhci);
dbc->state = DS_INITIALIZED;
/* Both segments (interrupts & acks) are written to same place address;
* Need to guarantee all commands will be received (in-order) by HW.
*/
/* Both segments (interrupts & acks) are written to same place address;
* Need to guarantee all commands will be received (in-order) by HW.
*/
u32 ready = ACINT_CODECRDY(ac97->num) | ACINT_REGACCRDY;
__raw_writel(ACCTL_ENLINK, base + ACCTLDIS);
u32 ready = ACINT_CODECRDY(ac97->num) | ACINT_REGACCRDY;
__raw_writel(ACCTL_ENLINK, base + ACCTLDIS);
udelay(1);
__raw_writel(ACCTL_ENLINK, base + ACCTLEN);
/* wait for primary codec ready status */
udelay(1);
__raw_writel(ACCTL_ENLINK, base + ACCTLEN);
/* wait for primary codec ready status */