Use likely/unlikely directives to improve branch prediction.
Signed-off-by: Rob Rice <rob.rice@broadcom.com>
Reviewed-by: Andy Gospodarek <gospo@broadcom.com>
Signed-off-by: Jassi Brar <jaswinder.singh@linaro.org>
/* allocate a buffer for the dma rx status */
vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
/* allocate a buffer for the dma rx status */
vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
struct pdc_state *pdcs = cookie;
u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
struct pdc_state *pdcs = cookie;
u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
- if (intstatus & PDC_RCVINTEN_0)
+ if (likely(intstatus & PDC_RCVINTEN_0))
set_bit(PDC_RCVINT_0, &pdcs->intstatus);
/* Clear interrupt flags in device */
iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
/* Wakeup IRQ thread */
set_bit(PDC_RCVINT_0, &pdcs->intstatus);
/* Clear interrupt flags in device */
iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
/* Wakeup IRQ thread */
- if (pdcs && (irq == pdcs->pdc_irq) && (intstatus & PDC_INTMASK))
+ if (likely(pdcs && (irq == pdcs->pdc_irq) && (intstatus & PDC_INTMASK)))
return IRQ_WAKE_THREAD;
return IRQ_NONE;
return IRQ_WAKE_THREAD;
return IRQ_NONE;
bool rx_int;
rx_int = test_and_clear_bit(PDC_RCVINT_0, &pdcs->intstatus);
bool rx_int;
rx_int = test_and_clear_bit(PDC_RCVINT_0, &pdcs->intstatus);
+ if (likely(pdcs && rx_int)) {
dev_dbg(&pdcs->pdev->dev,
"%s() got irq %d with rx_int %s",
__func__, irq, rx_int ? "set" : "clear");
dev_dbg(&pdcs->pdev->dev,
"%s() got irq %d with rx_int %s",
__func__, irq, rx_int ? "set" : "clear");
/* Allocate tx ring */
tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase);
/* Allocate tx ring */
tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase);
+ if (unlikely(!tx.vbase)) {
err = -ENOMEM;
goto done;
}
/* Allocate rx ring */
rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase);
err = -ENOMEM;
goto done;
}
/* Allocate rx ring */
rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase);
+ if (unlikely(!rx.vbase)) {
err = -ENOMEM;
goto fail_dealloc;
}
err = -ENOMEM;
goto fail_dealloc;
}
u32 tx_desc_req;
u32 rx_desc_req;
u32 tx_desc_req;
u32 rx_desc_req;
- if (mssg->type != BRCM_MESSAGE_SPU)
+ if (unlikely(mssg->type != BRCM_MESSAGE_SPU))
return -ENOTSUPP;
src_nent = sg_nents(mssg->spu.src);
return -ENOTSUPP;
src_nent = sg_nents(mssg->spu.src);
+ if (likely(src_nent)) {
nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
+ if (unlikely(nent == 0))
return -EIO;
}
dst_nent = sg_nents(mssg->spu.dst);
return -EIO;
}
dst_nent = sg_nents(mssg->spu.dst);
+ if (likely(dst_nent)) {
nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
DMA_FROM_DEVICE);
nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
DMA_FROM_DEVICE);
+ if (unlikely(nent == 0)) {
dma_unmap_sg(dev, mssg->spu.src, src_nent,
DMA_TO_DEVICE);
return -EIO;
dma_unmap_sg(dev, mssg->spu.src, src_nent,
DMA_TO_DEVICE);
return -EIO;
*/
tx_desc_req = pdc_desc_count(mssg->spu.src);
rx_desc_req = pdc_desc_count(mssg->spu.dst);
*/
tx_desc_req = pdc_desc_count(mssg->spu.src);
rx_desc_req = pdc_desc_count(mssg->spu.dst);
- if (pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1))
+ if (unlikely(pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1)))
return -ENOSPC;
/* Create rx descriptors to SPU catch response */
return -ENOSPC;
/* Create rx descriptors to SPU catch response */
err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
err |= pdc_tx_list_final(pdcs); /* initiate transfer */
err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
err |= pdc_tx_list_final(pdcs); /* initiate transfer */
dev_err(&pdcs->pdev->dev,
"%s failed with error %d", __func__, err);
dev_err(&pdcs->pdev->dev,
"%s failed with error %d", __func__, err);