S: Maintained
F: drivers/scsi/NCR_D700.*
+NCSI LIBRARY:
+M: Samuel Mendoza-Jonas <sam@mendozajonas.com>
+S: Maintained
+F: net/ncsi/
+
NCT6775 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <linux/unaligned/le_struct.h>
+#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
enic->rfs_h.max = enic->config.num_arfs;
enic->rfs_h.free = enic->rfs_h.max;
enic->rfs_h.toclean = 0;
- enic_rfs_timer_start(enic);
}
void enic_rfs_flw_tbl_free(struct enic *enic)
enic_rfs_timer_stop(enic);
spin_lock_bh(&enic->rfs_h.lock);
- enic->rfs_h.free = 0;
for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
struct hlist_head *hhead;
struct hlist_node *tmp;
enic_delfltr(enic, n->fltr_id);
hlist_del(&n->node);
kfree(n);
+ enic->rfs_h.free++;
}
}
spin_unlock_bh(&enic->rfs_h.lock);
{
struct enic *enic = netdev_priv(netdev);
unsigned int i;
- int err;
+ int err, ret;
err = enic_request_intr(enic);
if (err) {
vnic_intr_unmask(&enic->intr[i]);
enic_notify_timer_start(enic);
- enic_rfs_flw_tbl_init(enic);
+ enic_rfs_timer_start(enic);
return 0;
err_out_free_rq:
for (i = 0; i < enic->rq_count; i++) {
- err = vnic_rq_disable(&enic->rq[i]);
- if (err)
- return err;
- vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+ ret = vnic_rq_disable(&enic->rq[i]);
+ if (!ret)
+ vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
}
enic_dev_notify_unset(enic);
err_out_free_intr:
timer_setup(&enic->notify_timer, enic_notify_timer, 0);
+ enic_rfs_flw_tbl_init(enic);
enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
if (unlikely(nd->state != ncsi_dev_state_functional))
return;
- netdev_info(nd->dev, "NCSI interface %s\n",
- nd->link_up ? "up" : "down");
+ netdev_dbg(nd->dev, "NCSI interface %s\n",
+ nd->link_up ? "up" : "down");
}
static void ftgmac100_setup_clk(struct ftgmac100 *priv)
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(I40E_SKB_PAD +
- (xdp->data_end -
- xdp->data_hard_start));
+ SKB_DATA_ALIGN(xdp->data_end -
+ xdp->data_hard_start);
#endif
struct sk_buff *skb;
return NULL;
/* update pointers within the skb to store the data */
- skb_reserve(skb, I40E_SKB_PAD + (xdp->data - xdp->data_hard_start));
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
__skb_put(skb, xdp->data_end - xdp->data);
if (metasize)
skb_metadata_set(skb, metasize);
*type = DCBX_PROTOCOL_ROCE_V2;
} else {
*type = DCBX_MAX_PROTOCOL_TYPE;
- DP_ERR(p_hwfn,
- "No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n",
- id, app_prio_bitmap);
+ DP_ERR(p_hwfn, "No action required, App TLV entry = 0x%x\n",
+ app_prio_bitmap);
return false;
}
*cap = 0x80;
break;
case DCB_CAP_ATTR_DCBX:
- *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE |
- DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_STATIC);
+ *cap = (DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_VER_IEEE |
+ DCB_CAP_DCBX_STATIC);
break;
default:
*cap = false;
if (!dcbx_info)
return 0;
- if (dcbx_info->operational.enabled)
- mode |= DCB_CAP_DCBX_LLD_MANAGED;
if (dcbx_info->operational.ieee)
mode |= DCB_CAP_DCBX_VER_IEEE;
if (dcbx_info->operational.cee)
skb = build_skb(buffer->data, 0);
if (!skb) {
- rc = -ENOMEM;
- goto out_post;
+ DP_INFO(cdev, "Failed to build SKB\n");
+ kfree(buffer->data);
+ goto out_post1;
}
data->u.placement_offset += NET_SKB_PAD;
cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
data->opaque_data_0,
data->opaque_data_1);
+ } else {
+ DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA |
+ QED_MSG_LL2 | QED_MSG_STORAGE),
+ "Dropping the packet\n");
+ kfree(buffer->data);
}
+out_post1:
/* Update Buffer information and update FW producer */
buffer->data = new_data;
buffer->phys_addr = new_phys_addr;
/* Fastpath interrupts */
for (j = 0; j < 64; j++) {
if ((0x2ULL << j) & status) {
- hwfn->simd_proto_handler[j].func(
- hwfn->simd_proto_handler[j].token);
+ struct qed_simd_fp_handler *p_handler =
+ &hwfn->simd_proto_handler[j];
+
+ if (p_handler->func)
+ p_handler->func(p_handler->token);
+ else
+ DP_NOTICE(hwfn,
+ "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
+ j, status);
+
status &= ~(0x2ULL << j);
rc = IRQ_HANDLED;
}
config DWMAC_SOCFPGA
tristate "SOCFPGA dwmac support"
default ARCH_SOCFPGA
- depends on OF && (ARCH_SOCFPGA || COMPILE_TEST)
+ depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST)
select MFD_SYSCON
help
Support for ethernet controller on Altera SOCFPGA
struct device *dev;
struct regmap *sys_mgr_base_addr;
struct reset_control *stmmac_rst;
+ struct reset_control *stmmac_ocp_rst;
void __iomem *splitter_base;
bool f2h_ptp_ref_clk;
struct tse_pcs pcs;
val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
/* Assert reset to the enet controller before changing the phy mode */
- if (dwmac->stmmac_rst)
- reset_control_assert(dwmac->stmmac_rst);
+ reset_control_assert(dwmac->stmmac_ocp_rst);
+ reset_control_assert(dwmac->stmmac_rst);
regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
/* Deassert reset for the phy configuration to be sampled by
* the enet controller, and operation to start in requested mode
*/
- if (dwmac->stmmac_rst)
- reset_control_deassert(dwmac->stmmac_rst);
+ reset_control_deassert(dwmac->stmmac_ocp_rst);
+ reset_control_deassert(dwmac->stmmac_rst);
if (phymode == PHY_INTERFACE_MODE_SGMII) {
if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) {
dev_err(dwmac->dev, "Unable to initialize TSE PCS");
goto err_remove_config_dt;
}
+ dwmac->stmmac_ocp_rst = devm_reset_control_get_optional(dev, "stmmaceth-ocp");
+ if (IS_ERR(dwmac->stmmac_ocp_rst)) {
+ ret = PTR_ERR(dwmac->stmmac_ocp_rst);
+ dev_err(dev, "error getting reset control of ocp %d\n", ret);
+ goto err_remove_config_dt;
+ }
+
+ reset_control_deassert(dwmac->stmmac_ocp_rst);
+
ret = socfpga_dwmac_parse_data(dwmac, dev);
if (ret) {
dev_err(dev, "Unable to parse OF data\n");
static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
struct phy_device *phydev;
char phy_id_fmt[MII_BUS_ID_SIZE + 3];
char bus_id[MII_BUS_ID_SIZE];
phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full);
+ /*
+ * Half-duplex mode not supported with multiqueue
+ * half-duplex can only works with single queue
+ */
+ if (tx_cnt > 1)
+ phydev->supported &= ~(SUPPORTED_1000baseT_Half |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_10baseT_Half);
+
/*
* Broken HW is sometimes missing the pull-up resistor on the
* MDIO line, which results in reads to non-existent devices returning
#include <linux/sungem_phy.h>
#include "sungem.h"
-/* Stripping FCS is causing problems, disabled for now */
-#undef STRIP_FCS
+#define STRIP_FCS
#define DEFAULT_MSG (NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
- ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+ (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
writel(val, gp->regs + RXDMA_CFG);
if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
writel(((5 & RXDMA_BLANK_IPKTS) |
struct net_device *dev = gp->dev;
int entry, drops, work_done = 0;
u32 done;
- __sum16 csum;
if (netif_msg_rx_status(gp))
printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
skb = copy_skb;
}
- csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
- skb->csum = csum_unfold(csum);
- skb->ip_summed = CHECKSUM_COMPLETE;
+ if (likely(dev->features & NETIF_F_RXCSUM)) {
+ __sum16 csum;
+
+ csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
+ skb->csum = csum_unfold(csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
skb->protocol = eth_type_trans(skb, gp->dev);
napi_gro_receive(&gp->napi, skb);
writel(0, gp->regs + TXDMA_KICK);
val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
- ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+ (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
writel(val, gp->regs + RXDMA_CFG);
writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
pci_set_drvdata(pdev, dev);
/* We can do scatter/gather and HW checksum */
- dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
- dev->features |= dev->hw_features | NETIF_F_RXCSUM;
+ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ dev->features = dev->hw_features;
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
return -EOPNOTSUPP;
}
+static int match_first_device(struct device *dev, void *data)
+{
+ return !strncmp(dev_name(dev), "davinci_mdio", 12);
+}
+
/**
* emac_dev_open - EMAC device open
* @ndev: The DaVinci EMAC network adapter
/* use the first phy on the bus if pdata did not give us a phy id */
if (!phydev && !priv->phy_id) {
- phy = bus_find_device_by_name(&mdio_bus_type, NULL,
- "davinci_mdio");
+ /* NOTE: we can't use bus_find_device_by_name() here because
+ * the device name is not guaranteed to be 'davinci_mdio'. On
+ * some systems it can be 'davinci_mdio.0' so we need to use
+ * strncmp() against the first part of the string to correctly
+ * match it.
+ */
+ phy = bus_find_device(&mdio_bus_type, NULL, NULL,
+ match_first_device);
if (phy) {
priv->phy_id = dev_name(phy);
if (!priv->phy_id || !*priv->phy_id)
static const char banner[] __initconst = KERN_INFO \
"AX.25: bpqether driver version 004\n";
-static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
-
-static char bpq_eth_addr[6];
-
static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
static int bpq_device_event(struct notifier_block *, unsigned long, void *);
bpq->ethdev = edev;
bpq->axdev = ndev;
- memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr));
- memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr));
+ eth_broadcast_addr(bpq->dest_addr);
+ eth_broadcast_addr(bpq->acpt_addr);
err = register_netdevice(ndev);
if (err)
{
ether_setup(dev);
+ dev->max_mtu = ETH_MAX_MTU;
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
dev->netdev_ops = &ipvlan_netdev_ops;
netif_addr_lock_bh(failover_dev);
dev_uc_sync_multiple(slave_dev, failover_dev);
- dev_uc_sync_multiple(slave_dev, failover_dev);
+ dev_mc_sync_multiple(slave_dev, failover_dev);
netif_addr_unlock_bh(failover_dev);
err = vlan_vids_add_by_dev(slave_dev, failover_dev);
atomic_set(&ctx->stop, 1);
- if (hrtimer_active(&ctx->tx_timer))
- hrtimer_cancel(&ctx->tx_timer);
+ hrtimer_cancel(&ctx->tx_timer);
tasklet_kill(&ctx->bh);
}
pct = &sysoff->ts[0];
for (i = 0; i < sysoff->n_samples; i++) {
- getnstimeofday64(&ts);
+ ktime_get_real_ts64(&ts);
pct->sec = ts.tv_sec;
pct->nsec = ts.tv_nsec;
pct++;
pct->nsec = ts.tv_nsec;
pct++;
}
- getnstimeofday64(&ts);
+ ktime_get_real_ts64(&ts);
pct->sec = ts.tv_sec;
pct->nsec = ts.tv_nsec;
if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
pr_err("ioremap ptp registers failed\n");
goto no_ioremap;
}
- getnstimeofday64(&now);
+ ktime_get_real_ts64(&now);
ptp_qoriq_settime(&qoriq_ptp->caps, &now);
tmr_ctrl =
struct atm_skb_data {
struct atm_vcc *vcc; /* ATM VCC */
unsigned long atm_options; /* ATM layer options */
+ unsigned int acct_truesize; /* truesize accounted to vcc */
};
#define VCC_HTABLE_SIZE 32
void atm_dev_release_vccs(struct atm_dev *dev);
+static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ /*
+ * Because ATM skbs may not belong to a sock (and we don't
+ * necessarily want to), skb->truesize may be adjusted,
+ * escaping the hack in pskb_expand_head() which avoids
+ * doing so for some cases. So stash the value of truesize
+ * at the time we accounted it, and atm_pop_raw() can use
+ * that value later, in case it changes.
+ */
+ refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+ ATM_SKB(skb)->acct_truesize = skb->truesize;
+ ATM_SKB(skb)->atm_options = vcc->atm_options;
+}
static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
{
/* Map specifics */
struct xdp_buff;
+struct sk_buff;
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
void __dev_map_flush(struct bpf_map *map);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx);
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+ struct bpf_prog *xdp_prog);
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
return 0;
}
+struct sk_buff;
+
+static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
+ struct sk_buff *skb,
+ struct bpf_prog *xdp_prog)
+{
+ return 0;
+}
+
static inline
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
{
#include <linux/cryptohash.h>
#include <linux/set_memory.h>
#include <linux/kallsyms.h>
+#include <linux/if_vlan.h>
#include <net/sch_generic.h>
};
struct bpf_binary_header {
- unsigned int pages;
+ u16 pages;
+ u16 locked:1;
u8 image[];
};
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
fp->locked = 1;
- WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
+ if (set_memory_ro((unsigned long)fp, fp->pages))
+ fp->locked = 0;
+#endif
}
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
{
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
if (fp->locked) {
WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
/* In case set_memory_rw() fails, we want to be the first
*/
fp->locked = 0;
}
+#endif
}
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
- WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
-}
-
-static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
-{
- WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
-}
-#else
-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
-{
-}
-
-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
-{
-}
-
-static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
-{
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
+ hdr->locked = 1;
+ if (set_memory_ro((unsigned long)hdr, hdr->pages))
+ hdr->locked = 0;
+#endif
}
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
{
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
+ if (hdr->locked) {
+ WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
+ /* In case set_memory_rw() fails, we want to be the first
+ * to crash here instead of some random place later on.
+ */
+ hdr->locked = 0;
+ }
+#endif
}
-#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog *fp)
return (void *)addr;
}
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
+static inline int bpf_prog_check_pages_ro_single(const struct bpf_prog *fp)
+{
+ if (!fp->locked)
+ return -ENOLCK;
+ if (fp->jited) {
+ const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
+
+ if (!hdr->locked)
+ return -ENOLCK;
+ }
+
+ return 0;
+}
+#endif
+
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
{
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
+static inline int __xdp_generic_ok_fwd_dev(struct sk_buff *skb,
+ struct net_device *fwd)
+{
+ unsigned int len;
+
+ if (unlikely(!(fwd->flags & IFF_UP)))
+ return -ENETDOWN;
+
+ len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
+ if (skb->len > len)
+ return -EMSGSIZE;
+
+ return 0;
+}
+
/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
* same cpu context. Further for best results no more than a single map
* for the do_redirect/do_flush pair should be used. This limitation is
}
#endif /* CONFIG_BPF_JIT */
+void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
+void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
+
#define BPF_ANC BIT(15)
static inline bool bpf_needs_clear_a(const struct sock_filter *first)
unused:3;
struct fib6_nh fib6_nh;
+ struct rcu_head rcu;
};
struct rt6_info {
}
struct fib6_info *fib6_info_alloc(gfp_t gfp_flags);
-void fib6_info_destroy(struct fib6_info *f6i);
+void fib6_info_destroy_rcu(struct rcu_head *head);
static inline void fib6_info_hold(struct fib6_info *f6i)
{
static inline void fib6_info_release(struct fib6_info *f6i)
{
if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
- fib6_info_destroy(f6i);
+ call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
}
enum fib6_walk_state {
return prog_adj;
}
+void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
+{
+ int i;
+
+ for (i = 0; i < fp->aux->func_cnt; i++)
+ bpf_prog_kallsyms_del(fp->aux->func[i]);
+}
+
+void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
+{
+ bpf_prog_kallsyms_del_subprogs(fp);
+ bpf_prog_kallsyms_del(fp);
+}
+
#ifdef CONFIG_BPF_JIT
/* All BPF JIT sysctl knobs here. */
int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
bpf_fill_ill_insns(hdr, size);
hdr->pages = size / PAGE_SIZE;
+ hdr->locked = 0;
+
hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
PAGE_SIZE - sizeof(*hdr));
start = (get_random_int() % hole) & ~(alignment - 1);
return 0;
}
+static int bpf_prog_check_pages_ro_locked(const struct bpf_prog *fp)
+{
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
+ int i, err;
+
+ for (i = 0; i < fp->aux->func_cnt; i++) {
+ err = bpf_prog_check_pages_ro_single(fp->aux->func[i]);
+ if (err)
+ return err;
+ }
+
+ return bpf_prog_check_pages_ro_single(fp);
+#endif
+ return 0;
+}
+
+static void bpf_prog_select_func(struct bpf_prog *fp)
+{
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
+
+ fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
+#else
+ fp->bpf_func = __bpf_prog_ret0_warn;
+#endif
+}
+
/**
* bpf_prog_select_runtime - select exec runtime for BPF program
* @fp: bpf_prog populated with internal BPF program
*/
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
{
-#ifndef CONFIG_BPF_JIT_ALWAYS_ON
- u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
+ /* In case of BPF to BPF calls, verifier did all the prep
+ * work with regards to JITing, etc.
+ */
+ if (fp->bpf_func)
+ goto finalize;
- fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
-#else
- fp->bpf_func = __bpf_prog_ret0_warn;
-#endif
+ bpf_prog_select_func(fp);
/* eBPF JITs can rewrite the program in case constant
* blinding is active. However, in case of error during
if (*err)
return fp;
}
+
+finalize:
bpf_prog_lock_ro(fp);
/* The tail call compatibility check can only be done at
* all eBPF JITs might immediately support all features.
*/
*err = bpf_check_tail_call(fp);
-
+ if (*err)
+ return fp;
+
+ /* Checkpoint: at this point onwards any cBPF -> eBPF or
+ * native eBPF program is read-only. If we failed to change
+ * the page attributes (e.g. allocation failure from
+ * splitting large pages), then reject the whole program
+ * in order to guarantee not ending up with any W+X pages
+ * from BPF side in kernel.
+ */
+ *err = bpf_prog_check_pages_ro_locked(fp);
return fp;
}
EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
return bq_enqueue(dst, xdpf, dev_rx);
}
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+ struct bpf_prog *xdp_prog)
+{
+ int err;
+
+ err = __xdp_generic_ok_fwd_dev(skb, dst->dev);
+ if (unlikely(err))
+ return err;
+ skb->dev = dst->dev;
+ generic_xdp_tx(skb, xdp_prog);
+
+ return 0;
+}
+
static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
{
struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
{
if (atomic_dec_and_test(&prog->aux->refcnt)) {
- int i;
-
/* bpf_prog_free_id() must be called first */
bpf_prog_free_id(prog, do_idr_lock);
-
- for (i = 0; i < prog->aux->func_cnt; i++)
- bpf_prog_kallsyms_del(prog->aux->func[i]);
- bpf_prog_kallsyms_del(prog);
+ bpf_prog_kallsyms_del_all(prog);
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
}
if (err < 0)
goto free_used_maps;
- /* eBPF program is ready to be JITed */
- if (!prog->bpf_func)
- prog = bpf_prog_select_runtime(prog, &err);
+ prog = bpf_prog_select_runtime(prog, &err);
if (err < 0)
goto free_used_maps;
return err;
free_used_maps:
+ bpf_prog_kallsyms_del_subprogs(prog);
free_used_maps(prog->aux);
free_prog:
bpf_prog_uncharge_memlock(prog);
ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
- refcount_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
- ATM_SKB(skb)->atm_options = atmvcc->atm_options;
+ atm_account_tx(atmvcc, skb);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
memcpy(here, llc_oui, sizeof(llc_oui));
((__be16 *) here)[3] = skb->protocol;
}
- refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
- ATM_SKB(skb)->atm_options = vcc->atm_options;
+ atm_account_tx(vcc, skb);
entry->vccs->last_use = jiffies;
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
goto out;
}
pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
- refcount_add(skb->truesize, &sk->sk_wmem_alloc);
+ atm_account_tx(vcc, skb);
skb->dev = NULL; /* for paths shared with net_device interfaces */
- ATM_SKB(skb)->atm_options = vcc->atm_options;
if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
kfree_skb(skb);
error = -EFAULT;
struct net_device *dev = skb->dev;
ATM_SKB(skb)->vcc = vcc;
- ATM_SKB(skb)->atm_options = vcc->atm_options;
+ atm_account_tx(vcc, skb);
- refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
if (vcc->send(vcc, skb) < 0) {
dev->stats.tx_dropped++;
return;
sizeof(struct llc_snap_hdr));
}
- refcount_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
- ATM_SKB(skb)->atm_options = entry->shortcut->atm_options;
+ atm_account_tx(entry->shortcut, skb);
entry->shortcut->send(entry->shortcut, skb);
entry->packets_fwded++;
mpc->in_ops->put(entry);
return 1;
}
- refcount_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
- ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
+ atm_account_tx(vcc, skb);
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
struct sock *sk = sk_atm(vcc);
pr_debug("(%d) %d -= %d\n",
- vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
- WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
+ vcc->vci, sk_wmem_alloc_get(sk), ATM_SKB(skb)->acct_truesize);
+ WARN_ON(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk->sk_wmem_alloc));
dev_kfree_skb_any(skb);
sk->sk_write_space(sk);
}
--- /dev/null
+bpfilter_umh
# which bpfilter_kern.c passes further into umh blob loader at run-time
quiet_cmd_copy_umh = GEN $@
cmd_copy_umh = echo ':' > $(obj)/.bpfilter_umh.o.cmd; \
- $(OBJCOPY) -I binary -O `$(OBJDUMP) -f $<|grep format|cut -d' ' -f8` \
- -B `$(OBJDUMP) -f $<|grep architecture|cut -d, -f1|cut -d' ' -f2` \
+ $(OBJCOPY) -I binary \
+ `LC_ALL=C objdump -f net/bpfilter/bpfilter_umh \
+ |awk -F' |,' '/file format/{print "-O",$$NF} \
+ /^architecture:/{print "-B",$$2}'` \
--rename-section .data=.init.rodata $< $@
$(obj)/bpfilter_umh.o: $(obj)/bpfilter_umh
/* We get here if we can't use the current device name */
if (!pat)
goto out;
- if (dev_get_valid_name(net, dev, pat) < 0)
+ err = dev_get_valid_name(net, dev, pat);
+ if (err < 0)
goto out;
}
dev_close(dev);
/* And unlink it from device chain */
- err = -ENODEV;
unlist_netdevice(dev);
synchronize_net();
}
EXPORT_SYMBOL_GPL(xdp_do_redirect);
-static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
-{
- unsigned int len;
-
- if (unlikely(!(fwd->flags & IFF_UP)))
- return -ENETDOWN;
-
- len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
- if (skb->len > len)
- return -EMSGSIZE;
-
- return 0;
-}
-
static int xdp_do_generic_redirect_map(struct net_device *dev,
struct sk_buff *skb,
struct xdp_buff *xdp,
}
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
- if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
+ struct bpf_dtab_netdev *dst = fwd;
+
+ err = dev_map_generic_redirect(dst, skb, xdp_prog);
+ if (unlikely(err))
goto err;
- skb->dev = fwd;
- generic_xdp_tx(skb, xdp_prog);
} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
struct xdp_sock *xs = fwd;
bool dev_match = (sk->sk_bound_dev_if == dif ||
sk->sk_bound_dev_if == sdif);
- if (exact_dif && !dev_match)
+ if (!dev_match)
return -1;
- if (sk->sk_bound_dev_if && dev_match)
+ if (sk->sk_bound_dev_if)
score += 4;
}
if (sk->sk_incoming_cpu == raw_smp_processor_id())
cork->fragsize = ip_sk_use_pmtu(sk) ?
dst_mtu(&rt->dst) : rt->dst.dev->mtu;
- cork->gso_size = sk->sk_type == SOCK_DGRAM ? ipc->gso_size : 0;
+ cork->gso_size = sk->sk_type == SOCK_DGRAM &&
+ sk->sk_protocol == IPPROTO_UDP ? ipc->gso_size : 0;
cork->dst = &rt->dst;
cork->length = 0;
cork->ttl = ipc->ttl;
bool dev_match = (sk->sk_bound_dev_if == dif ||
sk->sk_bound_dev_if == sdif);
- if (exact_dif && !dev_match)
+ if (!dev_match)
return -1;
- if (sk->sk_bound_dev_if && dev_match)
+ if (sk->sk_bound_dev_if)
score++;
}
if (sk->sk_incoming_cpu == raw_smp_processor_id())
return f6i;
}
-void fib6_info_destroy(struct fib6_info *f6i)
+void fib6_info_destroy_rcu(struct rcu_head *head)
{
+ struct fib6_info *f6i = container_of(head, struct fib6_info, rcu);
struct rt6_exception_bucket *bucket;
struct dst_metrics *m;
kfree(f6i);
}
-EXPORT_SYMBOL_GPL(fib6_info_destroy);
+EXPORT_SYMBOL_GPL(fib6_info_destroy_rcu);
static struct fib6_node *node_alloc(struct net *net)
{
if (mtu < IPV6_MIN_MTU)
return -EINVAL;
cork->base.fragsize = mtu;
- cork->base.gso_size = sk->sk_type == SOCK_DGRAM ? ipc6->gso_size : 0;
+ cork->base.gso_size = sk->sk_type == SOCK_DGRAM &&
+ sk->sk_protocol == IPPROTO_UDP ? ipc6->gso_size : 0;
if (dst_allfrag(xfrm_dst_path(&rt->dst)))
cork->base.flags |= IPCORK_ALLFRAG;
ncm->data[2] = data;
ncm->data[4] = ntohl(lsc->oem_status);
- netdev_info(ndp->ndev.dev, "NCSI: LSC AEN - channel %u state %s\n",
- nc->id, data & 0x1 ? "up" : "down");
+ netdev_dbg(ndp->ndev.dev, "NCSI: LSC AEN - channel %u state %s\n",
+ nc->id, data & 0x1 ? "up" : "down");
chained = !list_empty(&nc->link);
state = nc->state;
hncdsc = (struct ncsi_aen_hncdsc_pkt *)h;
ncm->data[3] = ntohl(hncdsc->status);
spin_unlock_irqrestore(&nc->lock, flags);
- netdev_printk(KERN_DEBUG, ndp->ndev.dev,
- "NCSI: host driver %srunning on channel %u\n",
- ncm->data[3] & 0x1 ? "" : "not ", nc->id);
+ netdev_dbg(ndp->ndev.dev,
+ "NCSI: host driver %srunning on channel %u\n",
+ ncm->data[3] & 0x1 ? "" : "not ", nc->id);
return 0;
}
}
break;
case ncsi_dev_state_config_done:
- netdev_printk(KERN_DEBUG, ndp->ndev.dev,
- "NCSI: channel %u config done\n", nc->id);
+ netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
+ nc->id);
spin_lock_irqsave(&nc->lock, flags);
if (nc->reconfigure_needed) {
/* This channel's configuration has been updated
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
- netdev_printk(KERN_DEBUG, dev,
- "Dirty NCSI channel state reset\n");
+ netdev_dbg(dev, "Dirty NCSI channel state reset\n");
ncsi_process_next_channel(ndp);
break;
}
} else {
hot_nc = NULL;
nc->state = NCSI_CHANNEL_INACTIVE;
- netdev_warn(ndp->ndev.dev,
- "NCSI: channel %u link down after config\n",
- nc->id);
+ netdev_dbg(ndp->ndev.dev,
+ "NCSI: channel %u link down after config\n",
+ nc->id);
}
spin_unlock_irqrestore(&nc->lock, flags);
}
ncm = &found->modes[NCSI_MODE_LINK];
- netdev_printk(KERN_DEBUG, ndp->ndev.dev,
- "NCSI: Channel %u added to queue (link %s)\n",
- found->id, ncm->data[2] & 0x1 ? "up" : "down");
+ netdev_dbg(ndp->ndev.dev,
+ "NCSI: Channel %u added to queue (link %s)\n",
+ found->id, ncm->data[2] & 0x1 ? "up" : "down");
out:
spin_lock_irqsave(&ndp->lock, flags);
switch (old_state) {
case NCSI_CHANNEL_INACTIVE:
ndp->ndev.state = ncsi_dev_state_config;
- netdev_info(ndp->ndev.dev, "NCSI: configuring channel %u\n",
- nc->id);
+ netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
+ nc->id);
ncsi_configure_channel(ndp);
break;
case NCSI_CHANNEL_ACTIVE:
ndp->ndev.state = ncsi_dev_state_suspend;
- netdev_info(ndp->ndev.dev, "NCSI: suspending channel %u\n",
- nc->id);
+ netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
+ nc->id);
ncsi_suspend_channel(ndp);
break;
default:
return ncsi_choose_active_channel(ndp);
}
- netdev_printk(KERN_DEBUG, ndp->ndev.dev,
- "NCSI: No more channels to process\n");
ncsi_report_link(ndp, false);
return -ENODEV;
}
if ((ndp->ndev.state & 0xff00) ==
ncsi_dev_state_config ||
!list_empty(&nc->link)) {
- netdev_printk(KERN_DEBUG, nd->dev,
- "NCSI: channel %p marked dirty\n",
- nc);
+ netdev_dbg(nd->dev,
+ "NCSI: channel %p marked dirty\n",
+ nc);
nc->reconfigure_needed = true;
}
spin_unlock_irqrestore(&nc->lock, flags);
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
- netdev_printk(KERN_DEBUG, nd->dev,
- "NCSI: kicked channel %p\n", nc);
+ netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
n++;
}
}
list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
n_vids++;
if (vlan->vid == vid) {
- netdev_printk(KERN_DEBUG, dev,
- "NCSI: vid %u already registered\n", vid);
+ netdev_dbg(dev, "NCSI: vid %u already registered\n",
+ vid);
return 0;
}
}
vlan->vid = vid;
list_add_rcu(&vlan->list, &ndp->vlan_vids);
- netdev_printk(KERN_DEBUG, dev, "NCSI: Added new vid %u\n", vid);
+ netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
found = ncsi_kick_channels(ndp) != 0;
/* Remove the VLAN id from our internal list */
list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
if (vlan->vid == vid) {
- netdev_printk(KERN_DEBUG, dev,
- "NCSI: vid %u found, removing\n", vid);
+ netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
list_del_rcu(&vlan->list);
found = true;
kfree(vlan);
}
}
- netdev_printk(KERN_DEBUG, ndp->ndev.dev, "NCSI: Stopping device\n");
+ netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
ncsi_report_link(ndp, true);
}
EXPORT_SYMBOL_GPL(ncsi_stop_dev);
spin_unlock_bh(&ife->tcf_lock);
p = rcu_dereference_protected(ife->params, 1);
- kfree_rcu(p, rcu);
+ if (p)
+ kfree_rcu(p, rcu);
}
/* under ife->tcf_lock for existing action */
saddr = nla_data(tb[TCA_IFE_SMAC]);
}
- ife->tcf_action = parm->action;
-
if (parm->flags & IFE_ENCODE) {
if (daddr)
ether_addr_copy(p->eth_dst, daddr);
NULL, NULL);
if (err) {
metadata_parse_err:
- if (exists)
- tcf_idr_release(*a, bind);
if (ret == ACT_P_CREATED)
- _tcf_ife_cleanup(*a);
+ tcf_idr_release(*a, bind);
if (exists)
spin_unlock_bh(&ife->tcf_lock);
err = use_all_metadata(ife);
if (err) {
if (ret == ACT_P_CREATED)
- _tcf_ife_cleanup(*a);
+ tcf_idr_release(*a, bind);
if (exists)
spin_unlock_bh(&ife->tcf_lock);
}
}
+ ife->tcf_action = parm->action;
if (exists)
spin_unlock_bh(&ife->tcf_lock);
struct sk_buff **to_free)
{
qdisc_drop(skb, sch, to_free);
- return NET_XMIT_SUCCESS;
+ return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
}
static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
u64 addr;
int err;
+ if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
+ return -EINVAL;
+
if (!xskq_peek_addr(xs->umem->fq, &addr) ||
len > xs->umem->chunk_size_nohr) {
xs->rx_dropped++;
if (perf_query_supported)
goto out;
- fd = open(bin_name, O_RDONLY);
+ fd = open("/", O_RDONLY);
if (fd < 0) {
- p_err("perf_query_support: %s", strerror(errno));
+ p_err("perf_query_support: cannot open directory \"/\" (%s)",
+ strerror(errno));
goto out;
}
}
wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
- nsecs / 1000000000;
+ (real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) /
+ 1000000000;
+
if (!localtime_r(&wallclock_secs, &load_tm)) {
snprintf(buf, size, "%llu", nsecs / 1000000000);
CONFIG_NETDEVSIM=m
CONFIG_NET_CLS_ACT=y
CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_IPIP=y
+CONFIG_IPV6=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
+CONFIG_IPV6_GRE=y
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_HMAC=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_VXLAN=y
+CONFIG_GENEVE=y
def bpftool_prog_list(expected=None, ns=""):
_, progs = bpftool("prog show", JSON=True, ns=ns, fail=True)
+ # Remove the base progs
+ for p in base_progs:
+ if p in progs:
+ progs.remove(p)
if expected is not None:
if len(progs) != expected:
fail(True, "%d BPF programs loaded, expected %d" %
def bpftool_map_list(expected=None, ns=""):
_, maps = bpftool("map show", JSON=True, ns=ns, fail=True)
+ # Remove the base maps
+ for m in base_maps:
+ if m in maps:
+ maps.remove(m)
if expected is not None:
if len(maps) != expected:
fail(True, "%d BPF maps loaded, expected %d" %
# Check tools
ret, progs = bpftool("prog", fail=False)
skip(ret != 0, "bpftool not installed")
-# Check no BPF programs are loaded
-skip(len(progs) != 0, "BPF programs already loaded on the system")
+base_progs = progs
+_, base_maps = bpftool("map")
# Check netdevsim
ret, out = cmd("modprobe netdevsim", fail=False)
test_xfrm_tunnel()
{
config_device
- #tcpdump -nei veth1 ip &
- output=$(mktemp)
- cat /sys/kernel/debug/tracing/trace_pipe | tee $output &
- setup_xfrm_tunnel
+ > /sys/kernel/debug/tracing/trace
+ setup_xfrm_tunnel
tc qdisc add dev veth1 clsact
tc filter add dev veth1 proto ip ingress bpf da obj test_tunnel_kern.o \
sec xfrm_get_state
ip netns exec at_ns0 ping $PING_ARG 10.1.1.200
sleep 1
- grep "reqid 1" $output
+ grep "reqid 1" /sys/kernel/debug/tracing/trace
check_err $?
- grep "spi 0x1" $output
+ grep "spi 0x1" /sys/kernel/debug/tracing/trace
check_err $?
- grep "remote ip 0xac100164" $output
+ grep "remote ip 0xac100164" /sys/kernel/debug/tracing/trace
check_err $?
cleanup
if [ $ret -ne 0 ]; then
- echo -e ${RED}"FAIL: xfrm tunnel"${NC}
- return 1
- fi
- echo -e ${GREEN}"PASS: xfrm tunnel"${NC}
+ echo -e ${RED}"FAIL: xfrm tunnel"${NC}
+ return 1
+ fi
+ echo -e ${GREEN}"PASS: xfrm tunnel"${NC}
}
attach_bpf()
ip link del ip6geneve11 2> /dev/null
ip link del erspan11 2> /dev/null
ip link del ip6erspan11 2> /dev/null
+ ip xfrm policy delete dir out src 10.1.1.200/32 dst 10.1.1.100/32 2> /dev/null
+ ip xfrm policy delete dir in src 10.1.1.100/32 dst 10.1.1.200/32 2> /dev/null
+ ip xfrm state delete src 172.16.1.100 dst 172.16.1.200 proto esp spi 0x1 2> /dev/null
+ ip xfrm state delete src 172.16.1.200 dst 172.16.1.100 proto esp spi 0x2 2> /dev/null
}
cleanup_exit()
check()
{
- ip link help $1 2>&1 | grep -q "^Usage:"
+ ip link help 2>&1 | grep -q "\s$1\s"
if [ $? -ne 0 ];then
echo "SKIP $1: iproute2 not support"
cleanup