diff options
29 files changed, 733 insertions, 331 deletions
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 72c85cd34a4ee8..a1a177713d99d6 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -872,10 +872,7 @@ static void b53_enable_stp(struct b53_device *dev) static u16 b53_default_pvid(struct b53_device *dev) { - if (is5325(dev) || is5365(dev)) - return 1; - else - return 0; + return 0; } static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port) @@ -1699,9 +1696,6 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port, { struct b53_device *dev = ds->priv; - if ((is5325(dev) || is5365(dev)) && vlan->vid == 0) - return -EOPNOTSUPP; - /* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of * receiving VLAN tagged frames at all, we can still allow the port to * be configured for egress untagged. @@ -1853,19 +1847,24 @@ static int b53_arl_rw_op(struct b53_device *dev, unsigned int op) static void b53_arl_read_entry_25(struct b53_device *dev, struct b53_arl_entry *ent, u8 idx) { + u8 vid_entry; u64 mac_vid; + b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_VID_ENTRY_25(idx), + &vid_entry); b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), &mac_vid); - b53_arl_to_entry_25(ent, mac_vid); + b53_arl_to_entry_25(ent, mac_vid, vid_entry); } static void b53_arl_write_entry_25(struct b53_device *dev, const struct b53_arl_entry *ent, u8 idx) { + u8 vid_entry; u64 mac_vid; - b53_arl_from_entry_25(&mac_vid, ent); + b53_arl_from_entry_25(&mac_vid, &vid_entry, ent); + b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_VID_ENTRY_25(idx), vid_entry); b53_write64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid); } @@ -1966,8 +1965,12 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, /* Perform a read for the given MAC and VID */ b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac); - if (!is5325m(dev)) - b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); + if (!is5325m(dev)) { + if (is5325(dev) || is5365(dev)) + b53_write8(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); + else + b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); + } /* Issue a read operation for this MAC */ ret = b53_arl_rw_op(dev, 1); @@ -2115,20 +2118,12 @@ static void b53_arl_search_read_25(struct b53_device *dev, u8 idx, struct b53_arl_entry *ent) { u64 mac_vid; + u8 ext; + b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_EXT_25, &ext); b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_25, &mac_vid); - b53_arl_to_entry_25(ent, mac_vid); -} - -static void b53_arl_search_read_65(struct b53_device *dev, u8 idx, - struct b53_arl_entry *ent) -{ - u64 mac_vid; - - b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_65, - &mac_vid); - b53_arl_to_entry_25(ent, mac_vid); + b53_arl_search_to_entry_25(ent, mac_vid, ext); } static void b53_arl_search_read_89(struct b53_device *dev, u8 idx, @@ -2742,12 +2737,6 @@ static const struct b53_arl_ops b53_arl_ops_25 = { .arl_search_read = b53_arl_search_read_25, }; -static const struct b53_arl_ops b53_arl_ops_65 = { - .arl_read_entry = b53_arl_read_entry_25, - .arl_write_entry = b53_arl_write_entry_25, - .arl_search_read = b53_arl_search_read_65, -}; - static const struct b53_arl_ops b53_arl_ops_89 = { .arl_read_entry = b53_arl_read_entry_89, .arl_write_entry = b53_arl_write_entry_89, @@ -2810,7 +2799,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_buckets = 1024, .imp_port = 5, .duplex_reg = B53_DUPLEX_STAT_FE, - .arl_ops = &b53_arl_ops_65, + .arl_ops = &b53_arl_ops_25, }, { .chip_id = BCM5389_DEVICE_ID, diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 2bfd0e7c95c986..bd6849e5bb9390 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -341,16 +341,18 @@ static inline void b53_arl_to_entry(struct b53_arl_entry *ent, } static inline void b53_arl_to_entry_25(struct b53_arl_entry *ent, - u64 mac_vid) + u64 mac_vid, u8 vid_entry) { memset(ent, 0, sizeof(*ent)); - ent->port = (mac_vid >> ARLTBL_DATA_PORT_ID_S_25) & - ARLTBL_DATA_PORT_ID_MASK_25; ent->is_valid = !!(mac_vid & ARLTBL_VALID_25); ent->is_age = !!(mac_vid & ARLTBL_AGE_25); ent->is_static = !!(mac_vid & ARLTBL_STATIC_25); u64_to_ether_addr(mac_vid, ent->mac); - ent->vid = mac_vid >> ARLTBL_VID_S_65; + ent->port = (mac_vid & ARLTBL_DATA_PORT_ID_MASK_25) >> + ARLTBL_DATA_PORT_ID_S_25; + if (is_unicast_ether_addr(ent->mac) && ent->port == B53_CPU_PORT) + ent->port = B53_CPU_PORT_25; + ent->vid = vid_entry; } static inline void b53_arl_to_entry_89(struct b53_arl_entry *ent, @@ -379,20 +381,22 @@ static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry, *fwd_entry |= ARLTBL_AGE; } -static inline void b53_arl_from_entry_25(u64 *mac_vid, +static inline void b53_arl_from_entry_25(u64 *mac_vid, u8 *vid_entry, const struct b53_arl_entry *ent) { *mac_vid = ether_addr_to_u64(ent->mac); - *mac_vid |= (u64)(ent->port & ARLTBL_DATA_PORT_ID_MASK_25) << - ARLTBL_DATA_PORT_ID_S_25; - *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK_25) << - ARLTBL_VID_S_65; + if (is_unicast_ether_addr(ent->mac) && ent->port == B53_CPU_PORT_25) + *mac_vid |= (u64)B53_CPU_PORT << ARLTBL_DATA_PORT_ID_S_25; + else + *mac_vid |= ((u64)ent->port << ARLTBL_DATA_PORT_ID_S_25) & + ARLTBL_DATA_PORT_ID_MASK_25; if (ent->is_valid) *mac_vid |= ARLTBL_VALID_25; if (ent->is_static) *mac_vid |= ARLTBL_STATIC_25; if (ent->is_age) *mac_vid |= ARLTBL_AGE_25; + *vid_entry = ent->vid; } static inline void b53_arl_from_entry_89(u64 *mac_vid, u32 *fwd_entry, @@ -409,6 +413,24 @@ static inline void b53_arl_from_entry_89(u64 *mac_vid, u32 *fwd_entry, *fwd_entry |= ARLTBL_AGE_89; } +static inline void b53_arl_search_to_entry_25(struct b53_arl_entry *ent, + u64 mac_vid, u8 ext) +{ + memset(ent, 0, sizeof(*ent)); + ent->is_valid = !!(mac_vid & ARLTBL_VALID_25); + ent->is_age = !!(mac_vid & ARLTBL_AGE_25); + ent->is_static = !!(mac_vid & ARLTBL_STATIC_25); + u64_to_ether_addr(mac_vid, ent->mac); + ent->vid = (mac_vid & ARL_SRCH_RSLT_VID_MASK_25) >> + ARL_SRCH_RSLT_VID_S_25; + ent->port = (mac_vid & ARL_SRCH_RSLT_PORT_ID_MASK_25) >> + ARL_SRCH_RSLT_PORT_ID_S_25; + if (is_multicast_ether_addr(ent->mac) && (ext & ARL_SRCH_RSLT_EXT_MC_MII)) + ent->port |= BIT(B53_CPU_PORT_25); + else if (!is_multicast_ether_addr(ent->mac) && ent->port == B53_CPU_PORT) + ent->port = B53_CPU_PORT_25; +} + static inline void b53_arl_search_to_entry_63xx(struct b53_arl_entry *ent, u64 mac_vid, u16 fwd_entry) { diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index 69ebbec932f652..54a278db67c9fd 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h @@ -329,11 +329,9 @@ #define B53_ARLTBL_MAC_VID_ENTRY(n) ((0x10 * (n)) + 0x10) #define ARLTBL_MAC_MASK 0xffffffffffffULL #define ARLTBL_VID_S 48 -#define ARLTBL_VID_MASK_25 0xff #define ARLTBL_VID_MASK 0xfff #define ARLTBL_DATA_PORT_ID_S_25 48 -#define ARLTBL_DATA_PORT_ID_MASK_25 0xf -#define ARLTBL_VID_S_65 53 +#define ARLTBL_DATA_PORT_ID_MASK_25 GENMASK_ULL(53, 48) #define ARLTBL_AGE_25 BIT_ULL(61) #define ARLTBL_STATIC_25 BIT_ULL(62) #define ARLTBL_VALID_25 BIT_ULL(63) @@ -353,6 +351,9 @@ #define ARLTBL_STATIC_89 BIT(14) #define ARLTBL_VALID_89 BIT(15) +/* BCM5325/BCM565 ARL Table VID Entry N Registers (8 bit) */ +#define B53_ARLTBL_VID_ENTRY_25(n) ((0x2 * (n)) + 0x30) + /* Maximum number of bin entries in the ARL for all switches */ #define B53_ARLTBL_MAX_BIN_ENTRIES 4 @@ -376,10 +377,16 @@ #define B53_ARL_SRCH_RSLT_MACVID_89 0x33 #define B53_ARL_SRCH_RSLT_MACVID_63XX 0x34 -/* Single register search result on 5325 */ +/* Single register search result on 5325/5365 */ #define B53_ARL_SRCH_RSTL_0_MACVID_25 0x24 -/* Single register search result on 5365 */ -#define B53_ARL_SRCH_RSTL_0_MACVID_65 0x30 +#define ARL_SRCH_RSLT_PORT_ID_S_25 48 +#define ARL_SRCH_RSLT_PORT_ID_MASK_25 GENMASK_ULL(52, 48) +#define ARL_SRCH_RSLT_VID_S_25 53 +#define ARL_SRCH_RSLT_VID_MASK_25 GENMASK_ULL(60, 53) + +/* BCM5325/5365 Search result extend register (8 bit) */ +#define B53_ARL_SRCH_RSLT_EXT_25 0x2c +#define ARL_SRCH_RSLT_EXT_MC_MII BIT(2) /* ARL Search Data Result (32 bit) */ #define B53_ARL_SRCH_RSTL_0 0x68 diff --git a/drivers/net/dsa/yt921x.c b/drivers/net/dsa/yt921x.c index ebfd34f723143f..12e1bd5a106172 100644 --- a/drivers/net/dsa/yt921x.c +++ b/drivers/net/dsa/yt921x.c @@ -2098,6 +2098,117 @@ yt921x_dsa_port_bridge_join(struct dsa_switch *ds, int port, return res; } +static int +yt921x_dsa_port_mst_state_set(struct dsa_switch *ds, int port, + const struct switchdev_mst_state *st) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + u32 mask; + u32 ctrl; + int res; + + mask = YT921X_STP_PORTn_M(port); + switch (st->state) { + case BR_STATE_DISABLED: + ctrl = YT921X_STP_PORTn_DISABLED(port); + break; + case BR_STATE_LISTENING: + case BR_STATE_LEARNING: + ctrl = YT921X_STP_PORTn_LEARNING(port); + break; + case BR_STATE_FORWARDING: + default: + ctrl = YT921X_STP_PORTn_FORWARD(port); + break; + case BR_STATE_BLOCKING: + ctrl = YT921X_STP_PORTn_BLOCKING(port); + break; + } + + mutex_lock(&priv->reg_lock); + res = yt921x_reg_update_bits(priv, YT921X_STPn(st->msti), mask, ctrl); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_dsa_vlan_msti_set(struct dsa_switch *ds, struct dsa_bridge bridge, + const struct switchdev_vlan_msti *msti) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + u64 mask64; + u64 ctrl64; + int res; + + if (!msti->vid) + return -EINVAL; + if (!msti->msti || msti->msti >= YT921X_MSTI_NUM) + return -EINVAL; + + mask64 = YT921X_VLAN_CTRL_STP_ID_M; + ctrl64 = YT921X_VLAN_CTRL_STP_ID(msti->msti); + + mutex_lock(&priv->reg_lock); + res = yt921x_reg64_update_bits(priv, YT921X_VLANn_CTRL(msti->vid), + mask64, ctrl64); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static void +yt921x_dsa_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct dsa_port *dp = dsa_to_port(ds, port); + struct device *dev = to_device(priv); + bool learning; + u32 mask; + u32 ctrl; + int res; + + mask = YT921X_STP_PORTn_M(port); + learning = false; + switch (state) { + case BR_STATE_DISABLED: + ctrl = YT921X_STP_PORTn_DISABLED(port); + break; + case BR_STATE_LISTENING: + ctrl = YT921X_STP_PORTn_LEARNING(port); + break; + case BR_STATE_LEARNING: + ctrl = YT921X_STP_PORTn_LEARNING(port); + learning = dp->learning; + break; + case BR_STATE_FORWARDING: + default: + ctrl = YT921X_STP_PORTn_FORWARD(port); + learning = dp->learning; + break; + case BR_STATE_BLOCKING: + ctrl = YT921X_STP_PORTn_BLOCKING(port); + break; + } + + mutex_lock(&priv->reg_lock); + do { + res = yt921x_reg_update_bits(priv, YT921X_STPn(0), mask, ctrl); + if (res) + break; + + mask = YT921X_PORT_LEARN_DIS; + ctrl = !learning ? YT921X_PORT_LEARN_DIS : 0; + res = yt921x_reg_update_bits(priv, YT921X_PORTn_LEARN(port), + mask, ctrl); + } while (0); + mutex_unlock(&priv->reg_lock); + + if (res) + dev_err(dev, "Failed to %s port %d: %i\n", "set STP state for", + port, res); +} + static int yt921x_port_down(struct yt921x_priv *priv, int port) { u32 mask; @@ -2783,6 +2894,10 @@ static const struct dsa_switch_ops yt921x_dsa_switch_ops = { .port_bridge_flags = yt921x_dsa_port_bridge_flags, .port_bridge_leave = yt921x_dsa_port_bridge_leave, .port_bridge_join = yt921x_dsa_port_bridge_join, + /* mst */ + .port_mst_state_set = yt921x_dsa_port_mst_state_set, + .vlan_msti_set = yt921x_dsa_vlan_msti_set, + .port_stp_state_set = yt921x_dsa_port_stp_state_set, /* port */ .get_tag_protocol = yt921x_dsa_get_tag_protocol, .phylink_get_caps = yt921x_dsa_phylink_get_caps, @@ -2855,6 +2970,8 @@ static int yt921x_mdio_probe(struct mdio_device *mdiodev) ds->assisted_learning_on_cpu_port = true; ds->priv = priv; ds->ops = &yt921x_dsa_switch_ops; + ds->ageing_time_min = 1 * 5000; + ds->ageing_time_max = U16_MAX * 5000; ds->phylink_mac_ops = &yt921x_phylink_mac_ops; ds->num_ports = YT921X_PORT_NUM; diff --git a/drivers/net/dsa/yt921x.h b/drivers/net/dsa/yt921x.h index 44719d841d40e8..61bb0ab3b09a38 100644 --- a/drivers/net/dsa/yt921x.h +++ b/drivers/net/dsa/yt921x.h @@ -274,6 +274,13 @@ #define YT921X_VLAN_IGR_FILTER_PORTn(port) BIT(port) #define YT921X_PORTn_ISOLATION(port) (0x180294 + 4 * (port)) #define YT921X_PORT_ISOLATION_BLOCKn(port) BIT(port) +#define YT921X_STPn(n) (0x18038c + 4 * (n)) +#define YT921X_STP_PORTn_M(port) GENMASK(2 * (port) + 1, 2 * (port)) +#define YT921X_STP_PORTn(port, x) ((x) << (2 * (port))) +#define YT921X_STP_PORTn_DISABLED(port) YT921X_STP_PORTn(port, 0) +#define YT921X_STP_PORTn_LEARNING(port) YT921X_STP_PORTn(port, 1) +#define YT921X_STP_PORTn_BLOCKING(port) YT921X_STP_PORTn(port, 2) +#define YT921X_STP_PORTn_FORWARD(port) YT921X_STP_PORTn(port, 3) #define YT921X_PORTn_LEARN(port) (0x1803d0 + 4 * (port)) #define YT921X_PORT_LEARN_VID_LEARN_MULTI_EN BIT(22) #define YT921X_PORT_LEARN_VID_LEARN_MODE BIT(21) @@ -382,23 +389,23 @@ #define YT921X_FDB_HW_FLUSH_ON_LINKDOWN BIT(0) #define YT921X_VLANn_CTRL(vlan) (0x188000 + 8 * (vlan)) -#define YT921X_VLAN_CTRL_UNTAG_PORTS_M GENMASK(50, 40) +#define YT921X_VLAN_CTRL_UNTAG_PORTS_M GENMASK_ULL(50, 40) #define YT921X_VLAN_CTRL_UNTAG_PORTS(x) FIELD_PREP(YT921X_VLAN_CTRL_UNTAG_PORTS_M, (x)) -#define YT921X_VLAN_CTRL_UNTAG_PORTn(port) BIT((port) + 40) -#define YT921X_VLAN_CTRL_STP_ID_M GENMASK(39, 36) +#define YT921X_VLAN_CTRL_UNTAG_PORTn(port) BIT_ULL((port) + 40) +#define YT921X_VLAN_CTRL_STP_ID_M GENMASK_ULL(39, 36) #define YT921X_VLAN_CTRL_STP_ID(x) FIELD_PREP(YT921X_VLAN_CTRL_STP_ID_M, (x)) -#define YT921X_VLAN_CTRL_SVLAN_EN BIT(35) -#define YT921X_VLAN_CTRL_FID_M GENMASK(34, 23) +#define YT921X_VLAN_CTRL_SVLAN_EN BIT_ULL(35) +#define YT921X_VLAN_CTRL_FID_M GENMASK_ULL(34, 23) #define YT921X_VLAN_CTRL_FID(x) FIELD_PREP(YT921X_VLAN_CTRL_FID_M, (x)) -#define YT921X_VLAN_CTRL_LEARN_DIS BIT(22) -#define YT921X_VLAN_CTRL_INT_PRI_EN BIT(21) -#define YT921X_VLAN_CTRL_INT_PRI_M GENMASK(20, 18) -#define YT921X_VLAN_CTRL_PORTS_M GENMASK(17, 7) +#define YT921X_VLAN_CTRL_LEARN_DIS BIT_ULL(22) +#define YT921X_VLAN_CTRL_INT_PRI_EN BIT_ULL(21) +#define YT921X_VLAN_CTRL_INT_PRI_M GENMASK_ULL(20, 18) +#define YT921X_VLAN_CTRL_PORTS_M GENMASK_ULL(17, 7) #define YT921X_VLAN_CTRL_PORTS(x) FIELD_PREP(YT921X_VLAN_CTRL_PORTS_M, (x)) -#define YT921X_VLAN_CTRL_PORTn(port) BIT((port) + 7) -#define YT921X_VLAN_CTRL_BYPASS_1X_AC BIT(6) -#define YT921X_VLAN_CTRL_METER_EN BIT(5) -#define YT921X_VLAN_CTRL_METER_ID_M GENMASK(4, 0) +#define YT921X_VLAN_CTRL_PORTn(port) BIT_ULL((port) + 7) +#define YT921X_VLAN_CTRL_BYPASS_1X_AC BIT_ULL(6) +#define YT921X_VLAN_CTRL_METER_EN BIT_ULL(5) +#define YT921X_VLAN_CTRL_METER_ID_M GENMASK_ULL(4, 0) #define YT921X_TPID_IGRn(x) (0x210000 + 4 * (x)) /* [0, 3] */ #define YT921X_TPID_IGR_TPID_M GENMASK(15, 0) @@ -449,6 +456,8 @@ enum yt921x_fdb_entry_status { YT921X_FDB_ENTRY_STATUS_STATIC = 7, }; +#define YT921X_MSTI_NUM 16 + #define YT9215_MAJOR 0x9002 #define YT9218_MAJOR 0x9001 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 0653e69f0ef7a6..3ddd896d698774 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -367,10 +367,11 @@ static irqreturn_t xgbe_ecc_isr(int irq, void *data) static void xgbe_isr_bh_work(struct work_struct *work) { struct xgbe_prv_data *pdata = from_work(pdata, work, dev_bh_work); + unsigned int mac_isr, mac_tssr, mac_mdioisr; struct xgbe_hw_if *hw_if = &pdata->hw_if; - struct xgbe_channel *channel; + bool per_ch_irq, ti, ri, rbu, fbe; unsigned int dma_isr, dma_ch_isr; - unsigned int mac_isr, mac_tssr, mac_mdioisr; + struct xgbe_channel *channel; unsigned int i; /* The DMA interrupt status register also reports MAC and MTL @@ -384,43 +385,73 @@ static void xgbe_isr_bh_work(struct work_struct *work) netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr); for (i = 0; i < pdata->channel_count; i++) { + bool schedule_napi = false; + struct napi_struct *napi; + if (!(dma_isr & (1 << i))) continue; channel = pdata->channel[i]; dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); + + /* Precompute flags once */ + ti = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI); + ri = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI); + rbu = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU); + fbe = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE); + netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n", i, dma_ch_isr); - /* The TI or RI interrupt bits may still be set even if using - * per channel DMA interrupts. Check to be sure those are not - * enabled before using the private data napi structure. + per_ch_irq = pdata->per_channel_irq; + + /* + * Decide which NAPI to use and whether to schedule: + * - When not using per-channel IRQs: schedule on global NAPI + * if TI or RI are set. + * - RBU should also trigger NAPI (either per-channel or global) + * to allow refill. */ - if (!pdata->per_channel_irq && - (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || - XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) { - if (napi_schedule_prep(&pdata->napi)) { - /* Disable Tx and Rx interrupts */ - xgbe_disable_rx_tx_ints(pdata); + if (!per_ch_irq && (ti || ri)) + schedule_napi = true; - /* Turn on polling */ - __napi_schedule(&pdata->napi); + if (rbu) { + schedule_napi = true; + pdata->ext_stats.rx_buffer_unavailable++; + } + + napi = per_ch_irq ? &channel->napi : &pdata->napi; + + if (schedule_napi && napi_schedule_prep(napi)) { + /* Disable interrupts appropriately before polling */ + if (per_ch_irq) { + if (pdata->channel_irq_mode) + xgbe_disable_rx_tx_int(pdata, channel); + else + disable_irq_nosync(channel->dma_irq); + } else { + xgbe_disable_rx_tx_ints(pdata); } + + /* Turn on polling */ + __napi_schedule(napi); } else { - /* Don't clear Rx/Tx status if doing per channel DMA - * interrupts, these will be cleared by the ISR for - * per channel DMA interrupts. + /* + * Don't clear Rx/Tx status if doing per-channel DMA + * interrupts; those bits will be serviced/cleared by + * the per-channel ISR/NAPI. In non-per-channel mode + * when we're not scheduling NAPI here, ensure we don't + * accidentally clear TI/RI in HW: zero them in the + * local copy so that the eventual write-back does not + * clear TI/RI. */ XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0); XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0); } - if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU)) - pdata->ext_stats.rx_buffer_unavailable++; - /* Restart the device on a Fatal Bus Error */ - if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE)) + if (fbe) schedule_work(&pdata->restart_work); /* Clear interrupt signals */ diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 6e4f171425192b..846d58c769eaf0 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -41,7 +41,7 @@ module_param(tx_flow, int, 0); module_param(rx_flow, int, 0); module_param(copy_thresh, int, 0); module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */ -module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */ +module_param(rx_timeout, int, 0); /* Rx DMA wait time in 640ns increments */ module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */ @@ -262,7 +262,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) np->link_status = 0; /* Set media and reset PHY */ if (np->phy_media) { - /* default Auto-Negotiation for fiber deivices */ + /* default Auto-Negotiation for fiber devices */ if (np->an_enable == 2) { np->an_enable = 1; } @@ -887,7 +887,7 @@ tx_error (struct net_device *dev, int tx_status) frame_id = (tx_status & 0xffff0000); printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", dev->name, tx_status, frame_id); - /* Ttransmit Underrun */ + /* Transmit Underrun */ if (tx_status & 0x10) { dev->stats.tx_fifo_errors++; dw16(TxStartThresh, dr16(TxStartThresh) + 0x10); @@ -1083,7 +1083,7 @@ rio_error (struct net_device *dev, int int_status) get_stats (dev); } - /* PCI Error, a catastronphic error related to the bus interface + /* PCI Error, a catastrophic error related to the bus interface occurs, set GlobalReset and HostReset to reset. */ if (int_status & HostError) { printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n", diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h index 4788cc94639d1d..9ebf7a6db93e4f 100644 --- a/drivers/net/ethernet/dlink/dl2k.h +++ b/drivers/net/ethernet/dlink/dl2k.h @@ -270,7 +270,7 @@ enum _pcs_reg { PCS_ESR = 15, }; -/* IEEE Extened Status Register */ +/* IEEE Extended Status Register */ enum _mii_esr { MII_ESR_1000BX_FD = 0x8000, MII_ESR_1000BX_HD = 0x4000, diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index 00474ed11d53a3..baab4f1c908dd1 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -711,6 +711,13 @@ static int dpaa2_eth_update_cls_rule(struct net_device *net_dev, return 0; } +static u32 dpaa2_eth_get_rx_ring_count(struct net_device *net_dev) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + + return dpaa2_eth_queue_count(priv); +} + static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, struct ethtool_rxnfc *rxnfc, u32 *rule_locs) { @@ -719,9 +726,6 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, int i, j = 0; switch (rxnfc->cmd) { - case ETHTOOL_GRXRINGS: - rxnfc->data = dpaa2_eth_queue_count(priv); - break; case ETHTOOL_GRXCLSRLCNT: rxnfc->rule_cnt = 0; rxnfc->rule_cnt = dpaa2_eth_num_cls_rules(priv); @@ -949,6 +953,7 @@ const struct ethtool_ops dpaa2_ethtool_ops = { .get_strings = dpaa2_eth_get_strings, .get_rxnfc = dpaa2_eth_get_rxnfc, .set_rxnfc = dpaa2_eth_set_rxnfc, + .get_rx_ring_count = dpaa2_eth_get_rx_ring_count, .get_rxfh_fields = dpaa2_eth_get_rxfh_fields, .set_rxfh_fields = dpaa2_eth_set_rxfh_fields, .get_ts_info = dpaa2_eth_get_ts_info, diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index 3e222321b937c0..fed89d4f1e1dc4 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -633,6 +633,13 @@ done: return enetc_set_fs_entry(si, &rfse, fs->location); } +static u32 enetc_get_rx_ring_count(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + return priv->num_rx_rings; +} + static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc, u32 *rule_locs) { @@ -640,9 +647,6 @@ static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc, int i, j; switch (rxnfc->cmd) { - case ETHTOOL_GRXRINGS: - rxnfc->data = priv->num_rx_rings; - break; case ETHTOOL_GRXCLSRLCNT: /* total number of entries */ rxnfc->data = priv->si->num_fs_entries; @@ -681,27 +685,6 @@ static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc, return 0; } -/* i.MX95 ENETC does not support RFS table, but we can use ingress port - * filter table to implement Wake-on-LAN filter or drop the matched flow, - * so the implementation will be different from enetc_get_rxnfc() and - * enetc_set_rxnfc(). Therefore, add enetc4_get_rxnfc() for ENETC v4 PF. - */ -static int enetc4_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc, - u32 *rule_locs) -{ - struct enetc_ndev_priv *priv = netdev_priv(ndev); - - switch (rxnfc->cmd) { - case ETHTOOL_GRXRINGS: - rxnfc->data = priv->num_rx_rings; - break; - default: - return -EOPNOTSUPP; - } - - return 0; -} - static int enetc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc) { struct enetc_ndev_priv *priv = netdev_priv(ndev); @@ -1335,6 +1318,7 @@ const struct ethtool_ops enetc_pf_ethtool_ops = { .get_rmon_stats = enetc_get_rmon_stats, .get_eth_ctrl_stats = enetc_get_eth_ctrl_stats, .get_eth_mac_stats = enetc_get_eth_mac_stats, + .get_rx_ring_count = enetc_get_rx_ring_count, .get_rxnfc = enetc_get_rxnfc, .set_rxnfc = enetc_set_rxnfc, .get_rxfh_key_size = enetc_get_rxfh_key_size, @@ -1363,7 +1347,7 @@ const struct ethtool_ops enetc4_ppm_ethtool_ops = { ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, .get_eth_mac_stats = enetc_ppm_get_eth_mac_stats, - .get_rxnfc = enetc4_get_rxnfc, + .get_rx_ring_count = enetc_get_rx_ring_count, .get_rxfh_key_size = enetc_get_rxfh_key_size, .get_rxfh_indir_size = enetc_get_rxfh_indir_size, .get_rxfh = enetc_get_rxfh, @@ -1386,6 +1370,7 @@ const struct ethtool_ops enetc_vf_ethtool_ops = { .get_sset_count = enetc_get_sset_count, .get_strings = enetc_get_strings, .get_ethtool_stats = enetc_get_ethtool_stats, + .get_rx_ring_count = enetc_get_rx_ring_count, .get_rxnfc = enetc_get_rxnfc, .set_rxnfc = enetc_set_rxnfc, .get_rxfh_indir_size = enetc_get_rxfh_indir_size, @@ -1413,7 +1398,7 @@ const struct ethtool_ops enetc4_pf_ethtool_ops = { .set_wol = enetc_set_wol, .get_pauseparam = enetc_get_pauseparam, .set_pauseparam = enetc_set_pauseparam, - .get_rxnfc = enetc4_get_rxnfc, + .get_rx_ring_count = enetc_get_rx_ring_count, .get_rxfh_key_size = enetc_get_rxfh_key_size, .get_rxfh_indir_size = enetc_get_rxfh_indir_size, .get_rxfh = enetc_get_rxfh, diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 5fd1f732768030..6fa752d3b60d5f 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1431,6 +1431,13 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return ret; } +static u32 gfar_get_rx_ring_count(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + + return priv->num_rx_queues; +} + static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { @@ -1438,9 +1445,6 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, int ret = 0; switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = priv->num_rx_queues; - break; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = priv->rx_list.count; break; @@ -1519,6 +1523,7 @@ const struct ethtool_ops gfar_ethtool_ops = { #endif .set_rxnfc = gfar_set_nfc, .get_rxnfc = gfar_get_nfc, + .get_rx_ring_count = gfar_get_rx_ring_count, .set_rxfh_fields = gfar_set_rxfh_fields, .get_ts_info = gfar_get_ts_info, .get_link_ksettings = phy_ethtool_get_link_ksettings, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index cf8f14ce4cd50d..fddf7c207f8eee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -31,14 +31,15 @@ */ #include <linux/device.h> #include <linux/netdevice.h> +#include <linux/units.h> #include "en.h" #include "en/port.h" #include "en/port_buffer.h" #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ -#define MLX5E_100MB (100000) -#define MLX5E_1GB (1000000) +#define MLX5E_100MB_TO_KB (100 * MEGA / KILO) +#define MLX5E_1GB_TO_KB (GIGA / KILO) #define MLX5E_CEE_STATE_UP 1 #define MLX5E_CEE_STATE_DOWN 0 @@ -572,10 +573,10 @@ static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev, for (i = 0; i <= mlx5_max_tc(mdev); i++) { switch (max_bw_unit[i]) { case MLX5_100_MBPS_UNIT: - maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB; + maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB_TO_KB; break; case MLX5_GBPS_UNIT: - maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB; + maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB_TO_KB; break; case MLX5_BW_NO_LIMIT: break; @@ -595,8 +596,8 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev, struct mlx5_core_dev *mdev = priv->mdev; u8 max_bw_value[IEEE_8021QAZ_MAX_TCS]; u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS]; - __u64 upper_limit_mbps; - __u64 upper_limit_gbps; + u64 upper_limit_100mbps; + u64 upper_limit_gbps; int i; struct { int scale; @@ -614,22 +615,22 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev, memset(max_bw_value, 0, sizeof(max_bw_value)); memset(max_bw_unit, 0, sizeof(max_bw_unit)); - upper_limit_mbps = 255 * MLX5E_100MB; - upper_limit_gbps = 255 * MLX5E_1GB; + upper_limit_100mbps = U8_MAX * MLX5E_100MB_TO_KB; + upper_limit_gbps = U8_MAX * MLX5E_1GB_TO_KB; for (i = 0; i <= mlx5_max_tc(mdev); i++) { if (!maxrate->tc_maxrate[i]) { max_bw_unit[i] = MLX5_BW_NO_LIMIT; continue; } - if (maxrate->tc_maxrate[i] <= upper_limit_mbps) { + if (maxrate->tc_maxrate[i] <= upper_limit_100mbps) { max_bw_value[i] = div_u64(maxrate->tc_maxrate[i], - MLX5E_100MB); + MLX5E_100MB_TO_KB); max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1; max_bw_unit[i] = MLX5_100_MBPS_UNIT; } else if (maxrate->tc_maxrate[i] <= upper_limit_gbps) { max_bw_value[i] = div_u64(maxrate->tc_maxrate[i], - MLX5E_1GB); + MLX5E_1GB_TO_KB); max_bw_unit[i] = MLX5_GBPS_UNIT; } else { netdev_err(netdev, diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 8fd70b34807af0..efb4e412ec7e4d 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -15,6 +15,20 @@ struct dentry *mana_debugfs_root; +struct mana_dev_recovery { + struct list_head list; + struct pci_dev *pdev; + enum gdma_eqe_type type; +}; + +static struct mana_dev_recovery_work { + struct list_head dev_list; + struct delayed_work work; + + /* Lock for dev_list above */ + spinlock_t lock; +} mana_dev_recovery_work; + static u32 mana_gd_r32(struct gdma_context *g, u64 offset) { return readl(g->bar0_va + offset); @@ -387,6 +401,25 @@ EXPORT_SYMBOL_NS(mana_gd_ring_cq, "NET_MANA"); #define MANA_SERVICE_PERIOD 10 +static void mana_serv_rescan(struct pci_dev *pdev) +{ + struct pci_bus *parent; + + pci_lock_rescan_remove(); + + parent = pdev->bus; + if (!parent) { + dev_err(&pdev->dev, "MANA service: no parent bus\n"); + goto out; + } + + pci_stop_and_remove_bus_device(pdev); + pci_rescan_bus(parent); + +out: + pci_unlock_rescan_remove(); +} + static void mana_serv_fpga(struct pci_dev *pdev) { struct pci_bus *bus, *parent; @@ -419,9 +452,12 @@ static void mana_serv_reset(struct pci_dev *pdev) { struct gdma_context *gc = pci_get_drvdata(pdev); struct hw_channel_context *hwc; + int ret; if (!gc) { - dev_err(&pdev->dev, "MANA service: no GC\n"); + /* Perform PCI rescan on device if GC is not set up */ + dev_err(&pdev->dev, "MANA service: GC not setup, rescanning\n"); + mana_serv_rescan(pdev); return; } @@ -440,9 +476,18 @@ static void mana_serv_reset(struct pci_dev *pdev) msleep(MANA_SERVICE_PERIOD * 1000); - mana_gd_resume(pdev); + ret = mana_gd_resume(pdev); + if (ret == -ETIMEDOUT || ret == -EPROTO) { + /* Perform PCI rescan on device if we failed on HWC */ + dev_err(&pdev->dev, "MANA service: resume failed, rescanning\n"); + mana_serv_rescan(pdev); + goto out; + } - dev_info(&pdev->dev, "MANA reset cycle completed\n"); + if (ret) + dev_info(&pdev->dev, "MANA reset cycle failed err %d\n", ret); + else + dev_info(&pdev->dev, "MANA reset cycle completed\n"); out: gc->in_service = false; @@ -454,18 +499,9 @@ struct mana_serv_work { enum gdma_eqe_type type; }; -static void mana_serv_func(struct work_struct *w) +static void mana_do_service(enum gdma_eqe_type type, struct pci_dev *pdev) { - struct mana_serv_work *mns_wk; - struct pci_dev *pdev; - - mns_wk = container_of(w, struct mana_serv_work, serv_work); - pdev = mns_wk->pdev; - - if (!pdev) - goto out; - - switch (mns_wk->type) { + switch (type) { case GDMA_EQE_HWC_FPGA_RECONFIG: mana_serv_fpga(pdev); break; @@ -475,12 +511,48 @@ static void mana_serv_func(struct work_struct *w) break; default: - dev_err(&pdev->dev, "MANA service: unknown type %d\n", - mns_wk->type); + dev_err(&pdev->dev, "MANA service: unknown type %d\n", type); break; } +} + +static void mana_recovery_delayed_func(struct work_struct *w) +{ + struct mana_dev_recovery_work *work; + struct mana_dev_recovery *dev; + unsigned long flags; + + work = container_of(w, struct mana_dev_recovery_work, work.work); + + spin_lock_irqsave(&work->lock, flags); + + while (!list_empty(&work->dev_list)) { + dev = list_first_entry(&work->dev_list, + struct mana_dev_recovery, list); + list_del(&dev->list); + spin_unlock_irqrestore(&work->lock, flags); + + mana_do_service(dev->type, dev->pdev); + pci_dev_put(dev->pdev); + kfree(dev); + + spin_lock_irqsave(&work->lock, flags); + } + + spin_unlock_irqrestore(&work->lock, flags); +} + +static void mana_serv_func(struct work_struct *w) +{ + struct mana_serv_work *mns_wk; + struct pci_dev *pdev; + + mns_wk = container_of(w, struct mana_serv_work, serv_work); + pdev = mns_wk->pdev; + + if (pdev) + mana_do_service(mns_wk->type, pdev); -out: pci_dev_put(pdev); kfree(mns_wk); module_put(THIS_MODULE); @@ -541,6 +613,17 @@ static void mana_gd_process_eqe(struct gdma_queue *eq) case GDMA_EQE_HWC_RESET_REQUEST: dev_info(gc->dev, "Recv MANA service type:%d\n", type); + if (!test_and_set_bit(GC_PROBE_SUCCEEDED, &gc->flags)) { + /* + * Device is in probe and we received a hardware reset + * event, the probe function will detect that the flag + * has changed and perform service procedure. + */ + dev_info(gc->dev, + "Service is to be processed in probe\n"); + break; + } + if (gc->in_service) { dev_info(gc->dev, "Already in service\n"); break; @@ -1938,8 +2021,19 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto cleanup_mana; + /* + * If a hardware reset event has occurred over HWC during probe, + * rollback and perform hardware reset procedure. + */ + if (test_and_set_bit(GC_PROBE_SUCCEEDED, &gc->flags)) { + err = -EPROTO; + goto cleanup_mana_rdma; + } + return 0; +cleanup_mana_rdma: + mana_rdma_remove(&gc->mana_ib); cleanup_mana: mana_remove(&gc->mana, false); cleanup_gd: @@ -1963,6 +2057,35 @@ release_region: disable_dev: pci_disable_device(pdev); dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err); + + /* + * Hardware could be in recovery mode and the HWC returns TIMEDOUT or + * EPROTO from mana_gd_setup(), mana_probe() or mana_rdma_probe(), or + * we received a hardware reset event over HWC interrupt. In this case, + * perform the device recovery procedure after MANA_SERVICE_PERIOD + * seconds. + */ + if (err == -ETIMEDOUT || err == -EPROTO) { + struct mana_dev_recovery *dev; + unsigned long flags; + + dev_info(&pdev->dev, "Start MANA recovery mode\n"); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return err; + + dev->pdev = pci_dev_get(pdev); + dev->type = GDMA_EQE_HWC_RESET_REQUEST; + + spin_lock_irqsave(&mana_dev_recovery_work.lock, flags); + list_add_tail(&dev->list, &mana_dev_recovery_work.dev_list); + spin_unlock_irqrestore(&mana_dev_recovery_work.lock, flags); + + schedule_delayed_work(&mana_dev_recovery_work.work, + secs_to_jiffies(MANA_SERVICE_PERIOD)); + } + return err; } @@ -2067,6 +2190,10 @@ static int __init mana_driver_init(void) { int err; + INIT_LIST_HEAD(&mana_dev_recovery_work.dev_list); + spin_lock_init(&mana_dev_recovery_work.lock); + INIT_DELAYED_WORK(&mana_dev_recovery_work.work, mana_recovery_delayed_func); + mana_debugfs_root = debugfs_create_dir("mana", NULL); err = pci_register_driver(&mana_driver); @@ -2080,6 +2207,21 @@ static int __init mana_driver_init(void) static void __exit mana_driver_exit(void) { + struct mana_dev_recovery *dev; + unsigned long flags; + + disable_delayed_work_sync(&mana_dev_recovery_work.work); + + spin_lock_irqsave(&mana_dev_recovery_work.lock, flags); + while (!list_empty(&mana_dev_recovery_work.dev_list)) { + dev = list_first_entry(&mana_dev_recovery_work.dev_list, + struct mana_dev_recovery, list); + list_del(&dev->list); + pci_dev_put(dev->pdev); + kfree(dev); + } + spin_unlock_irqrestore(&mana_dev_recovery_work.lock, flags); + pci_unregister_driver(&mana_driver); debugfs_remove(mana_debugfs_root); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 6303196042118e..405e91eb3141f2 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -1512,7 +1512,6 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_52: return RTL_DASH_EP; case RTL_GIGA_MAC_VER_66: - case RTL_GIGA_MAC_VER_80: return RTL_DASH_25_BP; default: return RTL_DASH_NONE; diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c index 677f92883976ca..73e1364ad1ed7b 100644 --- a/drivers/net/pcs/pcs-lynx.c +++ b/drivers/net/pcs/pcs-lynx.c @@ -40,12 +40,12 @@ static unsigned int lynx_pcs_inband_caps(struct phylink_pcs *pcs, { switch (interface) { case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_QSGMII: return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE; case PHY_INTERFACE_MODE_10GBASER: - case PHY_INTERFACE_MODE_2500BASEX: return LINK_INBAND_DISABLE; case PHY_INTERFACE_MODE_USXGMII: @@ -80,27 +80,6 @@ static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs, phylink_decode_usxgmii_word(state, lpa); } -static void lynx_pcs_get_state_2500basex(struct mdio_device *pcs, - struct phylink_link_state *state) -{ - int bmsr; - - bmsr = mdiodev_read(pcs, MII_BMSR); - if (bmsr < 0) { - state->link = false; - return; - } - - state->link = !!(bmsr & BMSR_LSTATUS); - state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE); - if (!state->link) - return; - - state->speed = SPEED_2500; - state->pause |= MLO_PAUSE_TX | MLO_PAUSE_RX; - state->duplex = DUPLEX_FULL; -} - static void lynx_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode, struct phylink_link_state *state) { @@ -108,13 +87,11 @@ static void lynx_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode, switch (state->interface) { case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_QSGMII: phylink_mii_c22_pcs_get_state(lynx->mdio, neg_mode, state); break; - case PHY_INTERFACE_MODE_2500BASEX: - lynx_pcs_get_state_2500basex(lynx->mdio, state); - break; case PHY_INTERFACE_MODE_USXGMII: case PHY_INTERFACE_MODE_10G_QXGMII: lynx_pcs_get_state_usxgmii(lynx->mdio, state); @@ -152,7 +129,8 @@ static int lynx_pcs_config_giga(struct mdio_device *pcs, mdiodev_write(pcs, LINK_TIMER_HI, link_timer >> 16); } - if (interface == PHY_INTERFACE_MODE_1000BASEX) { + if (interface == PHY_INTERFACE_MODE_1000BASEX || + interface == PHY_INTERFACE_MODE_2500BASEX) { if_mode = 0; } else { /* SGMII and QSGMII */ @@ -202,15 +180,9 @@ static int lynx_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, case PHY_INTERFACE_MODE_1000BASEX: case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_QSGMII: + case PHY_INTERFACE_MODE_2500BASEX: return lynx_pcs_config_giga(lynx->mdio, ifmode, advertising, neg_mode); - case PHY_INTERFACE_MODE_2500BASEX: - if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) { - dev_err(&lynx->mdio->dev, - "AN not supported on 3.125GHz SerDes lane\n"); - return -EOPNOTSUPP; - } - break; case PHY_INTERFACE_MODE_USXGMII: case PHY_INTERFACE_MODE_10G_QXGMII: return lynx_pcs_config_usxgmii(lynx->mdio, ifmode, advertising, @@ -271,42 +243,6 @@ static void lynx_pcs_link_up_sgmii(struct mdio_device *pcs, if_mode); } -/* 2500Base-X is SerDes protocol 7 on Felix and 6 on ENETC. It is a SerDes lane - * clocked at 3.125 GHz which encodes symbols with 8b/10b and does not have - * auto-negotiation of any link parameters. Electrically it is compatible with - * a single lane of XAUI. - * The hardware reference manual wants to call this mode SGMII, but it isn't - * really, since the fundamental features of SGMII: - * - Downgrading the link speed by duplicating symbols - * - Auto-negotiation - * are not there. - * The speed is configured at 1000 in the IF_MODE because the clock frequency - * is actually given by a PLL configured in the Reset Configuration Word (RCW). - * Since there is no difference between fixed speed SGMII w/o AN and 802.3z w/o - * AN, we call this PHY interface type 2500Base-X. In case a PHY negotiates a - * lower link speed on line side, the system-side interface remains fixed at - * 2500 Mbps and we do rate adaptation through pause frames. - */ -static void lynx_pcs_link_up_2500basex(struct mdio_device *pcs, - unsigned int neg_mode, - int speed, int duplex) -{ - u16 if_mode = 0; - - if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) { - dev_err(&pcs->dev, "AN not supported for 2500BaseX\n"); - return; - } - - if (duplex == DUPLEX_HALF) - if_mode |= IF_MODE_HALF_DUPLEX; - if_mode |= IF_MODE_SPEED(SGMII_SPEED_2500); - - mdiodev_modify(pcs, IF_MODE, - IF_MODE_HALF_DUPLEX | IF_MODE_SPEED_MSK, - if_mode); -} - static void lynx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, int speed, int duplex) @@ -318,9 +254,6 @@ static void lynx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, case PHY_INTERFACE_MODE_QSGMII: lynx_pcs_link_up_sgmii(lynx->mdio, neg_mode, speed, duplex); break; - case PHY_INTERFACE_MODE_2500BASEX: - lynx_pcs_link_up_2500basex(lynx->mdio, neg_mode, speed, duplex); - break; case PHY_INTERFACE_MODE_USXGMII: case PHY_INTERFACE_MODE_10G_QXGMII: /* At the moment, only in-band AN is supported for USXGMII diff --git a/drivers/net/phy/mdio-open-alliance.h b/drivers/net/phy/mdio-open-alliance.h index 6850a3f0b31e20..449d0fb6709340 100644 --- a/drivers/net/phy/mdio-open-alliance.h +++ b/drivers/net/phy/mdio-open-alliance.h @@ -56,6 +56,8 @@ /* Advanced Diagnostic Features Capability Register*/ #define MDIO_OATC14_ADFCAP 0xcc00 #define OATC14_ADFCAP_HDD_CAPABILITY GENMASK(10, 8) +#define OATC14_ADFCAP_SQIPLUS_CAPABILITY GENMASK(4, 1) +#define OATC14_ADFCAP_SQI_CAPABILITY BIT(0) /* Harness Defect Detection Register */ #define MDIO_OATC14_HDD 0xcc01 @@ -65,6 +67,17 @@ #define OATC14_HDD_VALID BIT(2) #define OATC14_HDD_SHORT_OPEN_STATUS GENMASK(1, 0) +/* Dynamic Channel Quality SQI Register */ +#define MDIO_OATC14_DCQ_SQI 0xcc03 +#define OATC14_DCQ_SQI_VALUE GENMASK(2, 0) + +/* Dynamic Channel Quality SQI Plus Register */ +#define MDIO_OATC14_DCQ_SQIPLUS 0xcc04 +#define OATC14_DCQ_SQIPLUS_VALUE GENMASK(7, 0) + +/* SQI is supported using 3 bits means 8 levels (0-7) */ +#define OATC14_SQI_MAX_LEVEL 7 + /* Bus Short/Open Status: * 0 0 - no fault; everything is ok. (Default) * 0 1 - detected as an open or missing termination(s) diff --git a/drivers/net/phy/microchip_t1s.c b/drivers/net/phy/microchip_t1s.c index 5a0a667789770c..e601d56b2507f9 100644 --- a/drivers/net/phy/microchip_t1s.c +++ b/drivers/net/phy/microchip_t1s.c @@ -575,6 +575,8 @@ static struct phy_driver microchip_t1s_driver[] = { .get_plca_status = genphy_c45_plca_get_status, .cable_test_start = genphy_c45_oatc14_cable_test_start, .cable_test_get_status = genphy_c45_oatc14_cable_test_get_status, + .get_sqi = genphy_c45_oatc14_get_sqi, + .get_sqi_max = genphy_c45_oatc14_get_sqi_max, }, { PHY_ID_MATCH_EXACT(PHY_ID_LAN865X_REVB), diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index f5e23b53994f5b..d48aa7231b3705 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -1695,3 +1695,140 @@ int genphy_c45_oatc14_cable_test_start(struct phy_device *phydev) OATC14_HDD_START_CONTROL); } EXPORT_SYMBOL(genphy_c45_oatc14_cable_test_start); + +/** + * oatc14_update_sqi_capability - Read and update OATC14 10Base-T1S PHY SQI/SQI+ + * capability + * @phydev: Pointer to the PHY device structure + * + * This helper reads the OATC14 ADFCAP capability register to determine whether + * the PHY supports SQI or SQI+ reporting. + * + * SQI+ capability is detected first. The SQI+ field indicates the number of + * valid MSBs (3–8), corresponding to 8–256 SQI+ levels. When present, the + * function stores the number of SQI+ bits and computes the maximum SQI+ value + * as (2^bits - 1). + * + * If SQI+ is not supported, the function checks for basic SQI capability, + * which provides 0–7 SQI levels. + * + * On success, the capability information is stored in + * @phydev->oatc14_sqi_capability and marked as updated. + * + * Return: + * * 0 - capability successfully read and stored + * * -EOPNOTSUPP - SQI/SQI+ not supported by this PHY + * * Negative errno on read failure + */ +static int oatc14_update_sqi_capability(struct phy_device *phydev) +{ + u8 bits; + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_ADFCAP); + if (ret < 0) + return ret; + + /* Check for SQI+ capability + * 0 - SQI+ is not supported + * (3-8) bits for (8-256) SQI+ levels supported + */ + bits = FIELD_GET(OATC14_ADFCAP_SQIPLUS_CAPABILITY, ret); + if (bits) { + phydev->oatc14_sqi_capability.sqiplus_bits = bits; + /* Max sqi+ level supported: (2 ^ bits) - 1 */ + phydev->oatc14_sqi_capability.sqi_max = BIT(bits) - 1; + goto update_done; + } + + /* Check for SQI capability + * 0 - SQI is not supported + * 1 - SQI is supported (0-7 levels) + */ + if (ret & OATC14_ADFCAP_SQI_CAPABILITY) { + phydev->oatc14_sqi_capability.sqi_max = OATC14_SQI_MAX_LEVEL; + goto update_done; + } + + return -EOPNOTSUPP; + +update_done: + phydev->oatc14_sqi_capability.updated = true; + return 0; +} + +/** + * genphy_c45_oatc14_get_sqi_max - Get maximum supported SQI or SQI+ level of + * OATC14 10Base-T1S PHY + * @phydev: pointer to the PHY device structure + * + * This function returns the maximum supported Signal Quality Indicator (SQI) or + * SQI+ level. The SQI capability is updated on first invocation if it has not + * already been updated. + * + * Return: + * * Maximum SQI/SQI+ level supported + * * Negative errno on capability read failure + */ +int genphy_c45_oatc14_get_sqi_max(struct phy_device *phydev) +{ + int ret; + + if (!phydev->oatc14_sqi_capability.updated) { + ret = oatc14_update_sqi_capability(phydev); + if (ret) + return ret; + } + + return phydev->oatc14_sqi_capability.sqi_max; +} +EXPORT_SYMBOL(genphy_c45_oatc14_get_sqi_max); + +/** + * genphy_c45_oatc14_get_sqi - Get Signal Quality Indicator (SQI) from an OATC14 + * 10Base-T1S PHY + * @phydev: pointer to the PHY device structure + * + * This function reads the SQI+ or SQI value from an OATC14-compatible + * 10Base-T1S PHY. If SQI+ capability is supported, the function returns the + * extended SQI+ value; otherwise, it returns the basic SQI value. The SQI + * capability is updated on first invocation if it has not already been updated. + * + * Return: + * * SQI/SQI+ value on success + * * Negative errno on read failure + */ +int genphy_c45_oatc14_get_sqi(struct phy_device *phydev) +{ + u8 shift; + int ret; + + if (!phydev->oatc14_sqi_capability.updated) { + ret = oatc14_update_sqi_capability(phydev); + if (ret) + return ret; + } + + /* Calculate and return SQI+ value if supported */ + if (phydev->oatc14_sqi_capability.sqiplus_bits) { + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, + MDIO_OATC14_DCQ_SQIPLUS); + if (ret < 0) + return ret; + + /* SQI+ uses N MSBs out of 8 bits, left-aligned with padding 1's + * Calculate the right-shift needed to isolate the N bits. + */ + shift = 8 - phydev->oatc14_sqi_capability.sqiplus_bits; + + return (ret & OATC14_DCQ_SQIPLUS_VALUE) >> shift; + } + + /* Read and return SQI value if SQI+ capability is not supported */ + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_DCQ_SQI); + if (ret < 0) + return ret; + + return ret & OATC14_DCQ_SQI_VALUE; +} +EXPORT_SYMBOL(genphy_c45_oatc14_get_sqi); diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c index 0a41d2b45d8c8c..4d5c9ae8f22190 100644 --- a/drivers/net/team/team_core.c +++ b/drivers/net/team/team_core.c @@ -1231,7 +1231,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev, if (err) { if (dev->flags & IFF_PROMISC) dev_set_promiscuity(port_dev, -1); - goto err_set_slave_promisc; + goto err_set_slave_allmulti; } } @@ -1258,6 +1258,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev, return 0; err_set_dev_type: +err_set_slave_allmulti: err_set_slave_promisc: __team_option_inst_del_port(team, port); diff --git a/include/linux/phy.h b/include/linux/phy.h index 059a104223c4ca..fbbe028cc4b7b1 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -531,6 +531,30 @@ struct macsec_context; struct macsec_ops; /** + * struct phy_oatc14_sqi_capability - SQI capability information for OATC14 + * 10Base-T1S PHY + * @updated: Indicates whether the SQI capability fields have been updated. + * @sqi_max: Maximum supported Signal Quality Indicator (SQI) level reported by + * the PHY. + * @sqiplus_bits: Bits for SQI+ levels supported by the PHY. + * 0 - SQI+ is not supported + * 3 - SQI+ is supported, using 3 bits (8 levels) + * 4 - SQI+ is supported, using 4 bits (16 levels) + * 5 - SQI+ is supported, using 5 bits (32 levels) + * 6 - SQI+ is supported, using 6 bits (64 levels) + * 7 - SQI+ is supported, using 7 bits (128 levels) + * 8 - SQI+ is supported, using 8 bits (256 levels) + * + * This structure is used by the OATC14 10Base-T1S PHY driver to store the SQI + * and SQI+ capability information retrieved from the PHY. + */ +struct phy_oatc14_sqi_capability { + bool updated; + int sqi_max; + u8 sqiplus_bits; +}; + +/** * struct phy_device - An instance of a PHY * * @mdio: MDIO bus this PHY is on @@ -626,6 +650,7 @@ struct macsec_ops; * @link_down_events: Number of times link was lost * @shared: Pointer to private data shared by phys in one package * @priv: Pointer to driver private data + * @oatc14_sqi_capability: SQI capability information for OATC14 10Base-T1S PHY * * interrupts currently only supports enabled or disabled, * but could be changed in the future to support enabling @@ -772,6 +797,8 @@ struct phy_device { /* MACsec management functions */ const struct macsec_ops *macsec_ops; #endif + + struct phy_oatc14_sqi_capability oatc14_sqi_capability; }; /* Generic phy_device::dev_flags */ @@ -2257,6 +2284,8 @@ int genphy_c45_an_config_eee_aneg(struct phy_device *phydev); int genphy_c45_oatc14_cable_test_start(struct phy_device *phydev); int genphy_c45_oatc14_cable_test_get_status(struct phy_device *phydev, bool *finished); +int genphy_c45_oatc14_get_sqi_max(struct phy_device *phydev); +int genphy_c45_oatc14_get_sqi(struct phy_device *phydev); /* The gen10g_* functions are the old Clause 45 stub */ int gen10g_config_aneg(struct phy_device *phydev); diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index a4cf307859f857..eaa27483f99b2b 100644 --- a/include/net/mana/gdma.h +++ b/include/net/mana/gdma.h @@ -382,6 +382,10 @@ struct gdma_irq_context { char name[MANA_IRQ_NAME_SZ]; }; +enum gdma_context_flags { + GC_PROBE_SUCCEEDED = 0, +}; + struct gdma_context { struct device *dev; struct dentry *mana_pci_debugfs; @@ -430,6 +434,8 @@ struct gdma_context { u64 pf_cap_flags1; struct workqueue_struct *service_wq; + + unsigned long flags; }; static inline bool mana_gd_is_mana(struct gdma_dev *gd) @@ -600,6 +606,9 @@ enum { /* Driver can send HWC periodically to query stats */ #define GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY BIT(21) +/* Driver can handle hardware recovery events during probe */ +#define GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY BIT(22) + #define GDMA_DRV_CAP_FLAGS1 \ (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \ @@ -611,7 +620,8 @@ enum { GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE | \ GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE | \ GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY | \ - GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE) + GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE | \ + GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY) #define GDMA_DRV_CAP_FLAGS2 0 diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index 2d0c8275a3a82c..5cfaab7d089095 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -163,7 +163,7 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) seq_printf(m, " %d sessions, refcnt %d/%d\n", session_count, tunnel->sock ? refcount_read(&tunnel->sock->sk_refcnt) : 0, refcount_read(&tunnel->ref_count)); - seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n", + seq_printf(m, " %08x tx %ld/%ld/%ld rx %ld/%ld/%ld\n", 0, atomic_long_read(&tunnel->stats.tx_packets), atomic_long_read(&tunnel->stats.tx_bytes), diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c index be9149ac79dd28..75ea96c10e497e 100644 --- a/net/mctp/test/route-test.c +++ b/net/mctp/test/route-test.c @@ -20,7 +20,6 @@ struct mctp_frag_test { static void mctp_test_fragment(struct kunit *test) { const struct mctp_frag_test *params; - struct mctp_test_pktqueue tpq; int rc, i, n, mtu, msgsize; struct mctp_test_dev *dev; struct mctp_dst dst; @@ -43,13 +42,12 @@ static void mctp_test_fragment(struct kunit *test) dev = mctp_test_create_dev(); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); - mctp_test_dst_setup(test, &dst, dev, &tpq, mtu); + mctp_test_dst_setup(test, &dst, dev, mtu); rc = mctp_do_fragment_route(&dst, skb, mtu, MCTP_TAG_OWNER); KUNIT_EXPECT_FALSE(test, rc); - n = tpq.pkts.qlen; - + n = dev->pkts.qlen; KUNIT_EXPECT_EQ(test, n, params->n_frags); for (i = 0;; i++) { @@ -61,8 +59,7 @@ static void mctp_test_fragment(struct kunit *test) first = i == 0; last = i == (n - 1); - skb2 = skb_dequeue(&tpq.pkts); - + skb2 = skb_dequeue(&dev->pkts); if (!skb2) break; @@ -99,7 +96,7 @@ static void mctp_test_fragment(struct kunit *test) kfree_skb(skb2); } - mctp_test_dst_release(&dst, &tpq); + mctp_dst_release(&dst); mctp_test_destroy_dev(dev); } @@ -130,13 +127,11 @@ struct mctp_rx_input_test { static void mctp_test_rx_input(struct kunit *test) { const struct mctp_rx_input_test *params; - struct mctp_test_pktqueue tpq; struct mctp_test_route *rt; struct mctp_test_dev *dev; struct sk_buff *skb; params = test->param_value; - test->priv = &tpq; dev = mctp_test_create_dev(); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); @@ -147,13 +142,10 @@ static void mctp_test_rx_input(struct kunit *test) skb = mctp_test_create_skb(¶ms->hdr, 1); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb); - mctp_test_pktqueue_init(&tpq); - mctp_pkttype_receive(skb, dev->ndev, &mctp_packet_type, NULL); - KUNIT_EXPECT_EQ(test, !!tpq.pkts.qlen, params->input); + KUNIT_EXPECT_EQ(test, !!dev->pkts.qlen, params->input); - skb_queue_purge(&tpq.pkts); mctp_test_route_destroy(test, rt); mctp_test_destroy_dev(dev); } @@ -182,7 +174,6 @@ KUNIT_ARRAY_PARAM(mctp_rx_input, mctp_rx_input_tests, static void __mctp_route_test_init(struct kunit *test, struct mctp_test_dev **devp, struct mctp_dst *dst, - struct mctp_test_pktqueue *tpq, struct socket **sockp, unsigned int netid) { @@ -196,7 +187,7 @@ static void __mctp_route_test_init(struct kunit *test, if (netid != MCTP_NET_ANY) WRITE_ONCE(dev->mdev->net, netid); - mctp_test_dst_setup(test, dst, dev, tpq, 68); + mctp_test_dst_setup(test, dst, dev, 68); rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock); KUNIT_ASSERT_EQ(test, rc, 0); @@ -215,11 +206,10 @@ static void __mctp_route_test_init(struct kunit *test, static void __mctp_route_test_fini(struct kunit *test, struct mctp_test_dev *dev, struct mctp_dst *dst, - struct mctp_test_pktqueue *tpq, struct socket *sock) { sock_release(sock); - mctp_test_dst_release(dst, tpq); + mctp_dst_release(dst); mctp_test_destroy_dev(dev); } @@ -232,7 +222,6 @@ struct mctp_route_input_sk_test { static void mctp_test_route_input_sk(struct kunit *test) { const struct mctp_route_input_sk_test *params; - struct mctp_test_pktqueue tpq; struct sk_buff *skb, *skb2; struct mctp_test_dev *dev; struct mctp_dst dst; @@ -241,13 +230,12 @@ static void mctp_test_route_input_sk(struct kunit *test) params = test->param_value; - __mctp_route_test_init(test, &dev, &dst, &tpq, &sock, MCTP_NET_ANY); + __mctp_route_test_init(test, &dev, &dst, &sock, MCTP_NET_ANY); skb = mctp_test_create_skb_data(¶ms->hdr, ¶ms->type); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb); mctp_test_skb_set_dev(skb, dev); - mctp_test_pktqueue_init(&tpq); rc = mctp_dst_input(&dst, skb); @@ -266,7 +254,7 @@ static void mctp_test_route_input_sk(struct kunit *test) KUNIT_EXPECT_NULL(test, skb2); } - __mctp_route_test_fini(test, dev, &dst, &tpq, sock); + __mctp_route_test_fini(test, dev, &dst, sock); } #define FL_S (MCTP_HDR_FLAG_SOM) @@ -303,7 +291,6 @@ struct mctp_route_input_sk_reasm_test { static void mctp_test_route_input_sk_reasm(struct kunit *test) { const struct mctp_route_input_sk_reasm_test *params; - struct mctp_test_pktqueue tpq; struct sk_buff *skb, *skb2; struct mctp_test_dev *dev; struct mctp_dst dst; @@ -313,7 +300,7 @@ static void mctp_test_route_input_sk_reasm(struct kunit *test) params = test->param_value; - __mctp_route_test_init(test, &dev, &dst, &tpq, &sock, MCTP_NET_ANY); + __mctp_route_test_init(test, &dev, &dst, &sock, MCTP_NET_ANY); for (i = 0; i < params->n_hdrs; i++) { c = i; @@ -336,7 +323,7 @@ static void mctp_test_route_input_sk_reasm(struct kunit *test) KUNIT_EXPECT_NULL(test, skb2); } - __mctp_route_test_fini(test, dev, &dst, &tpq, sock); + __mctp_route_test_fini(test, dev, &dst, sock); } #define RX_FRAG(f, s) RX_HDR(1, 10, 8, FL_TO | (f) | ((s) << MCTP_HDR_SEQ_SHIFT)) @@ -438,7 +425,6 @@ struct mctp_route_input_sk_keys_test { static void mctp_test_route_input_sk_keys(struct kunit *test) { const struct mctp_route_input_sk_keys_test *params; - struct mctp_test_pktqueue tpq; struct sk_buff *skb, *skb2; struct mctp_test_dev *dev; struct mctp_sk_key *key; @@ -457,7 +443,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); net = READ_ONCE(dev->mdev->net); - mctp_test_dst_setup(test, &dst, dev, &tpq, 68); + mctp_test_dst_setup(test, &dst, dev, 68); rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock); KUNIT_ASSERT_EQ(test, rc, 0); @@ -497,7 +483,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test) skb_free_datagram(sock->sk, skb2); mctp_key_unref(key); - __mctp_route_test_fini(test, dev, &dst, &tpq, sock); + __mctp_route_test_fini(test, dev, &dst, sock); } static const struct mctp_route_input_sk_keys_test mctp_route_input_sk_keys_tests[] = { @@ -572,7 +558,6 @@ KUNIT_ARRAY_PARAM(mctp_route_input_sk_keys, mctp_route_input_sk_keys_tests, struct test_net { unsigned int netid; struct mctp_test_dev *dev; - struct mctp_test_pktqueue tpq; struct mctp_dst dst; struct socket *sock; struct sk_buff *skb; @@ -591,20 +576,18 @@ mctp_test_route_input_multiple_nets_bind_init(struct kunit *test, t->msg.data = t->netid; - __mctp_route_test_init(test, &t->dev, &t->dst, &t->tpq, &t->sock, - t->netid); + __mctp_route_test_init(test, &t->dev, &t->dst, &t->sock, t->netid); t->skb = mctp_test_create_skb_data(&hdr, &t->msg); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->skb); mctp_test_skb_set_dev(t->skb, t->dev); - mctp_test_pktqueue_init(&t->tpq); } static void mctp_test_route_input_multiple_nets_bind_fini(struct kunit *test, struct test_net *t) { - __mctp_route_test_fini(test, t->dev, &t->dst, &t->tpq, t->sock); + __mctp_route_test_fini(test, t->dev, &t->dst, t->sock); } /* Test that skbs from different nets (otherwise identical) get routed to their @@ -661,8 +644,7 @@ mctp_test_route_input_multiple_nets_key_init(struct kunit *test, t->msg.data = t->netid; - __mctp_route_test_init(test, &t->dev, &t->dst, &t->tpq, &t->sock, - t->netid); + __mctp_route_test_init(test, &t->dev, &t->dst, &t->sock, t->netid); msk = container_of(t->sock->sk, struct mctp_sock, sk); @@ -685,7 +667,7 @@ mctp_test_route_input_multiple_nets_key_fini(struct kunit *test, struct test_net *t) { mctp_key_unref(t->key); - __mctp_route_test_fini(test, t->dev, &t->dst, &t->tpq, t->sock); + __mctp_route_test_fini(test, t->dev, &t->dst, t->sock); } /* test that skbs from different nets (otherwise identical) get routed to their @@ -738,14 +720,13 @@ static void mctp_test_route_input_multiple_nets_key(struct kunit *test) static void mctp_test_route_input_sk_fail_single(struct kunit *test) { const struct mctp_hdr hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_TO); - struct mctp_test_pktqueue tpq; struct mctp_test_dev *dev; struct mctp_dst dst; struct socket *sock; struct sk_buff *skb; int rc; - __mctp_route_test_init(test, &dev, &dst, &tpq, &sock, MCTP_NET_ANY); + __mctp_route_test_init(test, &dev, &dst, &sock, MCTP_NET_ANY); /* No rcvbuf space, so delivery should fail. __sock_set_rcvbuf will * clamp the minimum to SOCK_MIN_RCVBUF, so we open-code this. @@ -768,7 +749,7 @@ static void mctp_test_route_input_sk_fail_single(struct kunit *test) KUNIT_EXPECT_EQ(test, refcount_read(&skb->users), 1); kfree_skb(skb); - __mctp_route_test_fini(test, dev, &dst, &tpq, sock); + __mctp_route_test_fini(test, dev, &dst, sock); } /* Input route to socket, using a fragmented message, where sock delivery fails. @@ -776,7 +757,6 @@ static void mctp_test_route_input_sk_fail_single(struct kunit *test) static void mctp_test_route_input_sk_fail_frag(struct kunit *test) { const struct mctp_hdr hdrs[2] = { RX_FRAG(FL_S, 0), RX_FRAG(FL_E, 1) }; - struct mctp_test_pktqueue tpq; struct mctp_test_dev *dev; struct sk_buff *skbs[2]; struct mctp_dst dst; @@ -784,7 +764,7 @@ static void mctp_test_route_input_sk_fail_frag(struct kunit *test) unsigned int i; int rc; - __mctp_route_test_init(test, &dev, &dst, &tpq, &sock, MCTP_NET_ANY); + __mctp_route_test_init(test, &dev, &dst, &sock, MCTP_NET_ANY); lock_sock(sock->sk); WRITE_ONCE(sock->sk->sk_rcvbuf, 0); @@ -815,7 +795,7 @@ static void mctp_test_route_input_sk_fail_frag(struct kunit *test) KUNIT_EXPECT_EQ(test, refcount_read(&skbs[1]->users), 1); kfree_skb(skbs[1]); - __mctp_route_test_fini(test, dev, &dst, &tpq, sock); + __mctp_route_test_fini(test, dev, &dst, sock); } /* Input route to socket, using a fragmented message created from clones. @@ -833,7 +813,6 @@ static void mctp_test_route_input_cloned_frag(struct kunit *test) const size_t data_len = 3; /* arbitrary */ u8 compare[3 * ARRAY_SIZE(hdrs)]; u8 flat[3 * ARRAY_SIZE(hdrs)]; - struct mctp_test_pktqueue tpq; struct mctp_test_dev *dev; struct sk_buff *skb[5]; struct sk_buff *rx_skb; @@ -845,7 +824,7 @@ static void mctp_test_route_input_cloned_frag(struct kunit *test) total = data_len + sizeof(struct mctp_hdr); - __mctp_route_test_init(test, &dev, &dst, &tpq, &sock, MCTP_NET_ANY); + __mctp_route_test_init(test, &dev, &dst, &sock, MCTP_NET_ANY); /* Create a single skb initially with concatenated packets */ skb[0] = mctp_test_create_skb(&hdrs[0], 5 * total); @@ -922,7 +901,7 @@ static void mctp_test_route_input_cloned_frag(struct kunit *test) kfree_skb(skb[i]); } - __mctp_route_test_fini(test, dev, &dst, &tpq, sock); + __mctp_route_test_fini(test, dev, &dst, sock); } #if IS_ENABLED(CONFIG_MCTP_FLOWS) @@ -930,7 +909,6 @@ static void mctp_test_route_input_cloned_frag(struct kunit *test) static void mctp_test_flow_init(struct kunit *test, struct mctp_test_dev **devp, struct mctp_dst *dst, - struct mctp_test_pktqueue *tpq, struct socket **sock, struct sk_buff **skbp, unsigned int len) @@ -944,7 +922,7 @@ static void mctp_test_flow_init(struct kunit *test, * mctp_local_output, which will call dst->output on whatever * route we provide */ - __mctp_route_test_init(test, &dev, dst, tpq, sock, MCTP_NET_ANY); + __mctp_route_test_init(test, &dev, dst, sock, MCTP_NET_ANY); /* Assign a single EID. ->addrs is freed on mctp netdev release */ dev->mdev->addrs = kmalloc(sizeof(u8), GFP_KERNEL); @@ -965,16 +943,14 @@ static void mctp_test_flow_init(struct kunit *test, static void mctp_test_flow_fini(struct kunit *test, struct mctp_test_dev *dev, struct mctp_dst *dst, - struct mctp_test_pktqueue *tpq, struct socket *sock) { - __mctp_route_test_fini(test, dev, dst, tpq, sock); + __mctp_route_test_fini(test, dev, dst, sock); } /* test that an outgoing skb has the correct MCTP extension data set */ static void mctp_test_packet_flow(struct kunit *test) { - struct mctp_test_pktqueue tpq; struct sk_buff *skb, *skb2; struct mctp_test_dev *dev; struct mctp_dst dst; @@ -983,15 +959,15 @@ static void mctp_test_packet_flow(struct kunit *test) u8 dst_eid = 8; int n, rc; - mctp_test_flow_init(test, &dev, &dst, &tpq, &sock, &skb, 30); + mctp_test_flow_init(test, &dev, &dst, &sock, &skb, 30); rc = mctp_local_output(sock->sk, &dst, skb, dst_eid, MCTP_TAG_OWNER); KUNIT_ASSERT_EQ(test, rc, 0); - n = tpq.pkts.qlen; + n = dev->pkts.qlen; KUNIT_ASSERT_EQ(test, n, 1); - skb2 = skb_dequeue(&tpq.pkts); + skb2 = skb_dequeue(&dev->pkts); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb2); flow = skb_ext_find(skb2, SKB_EXT_MCTP); @@ -1000,7 +976,7 @@ static void mctp_test_packet_flow(struct kunit *test) KUNIT_ASSERT_PTR_EQ(test, flow->key->sk, sock->sk); kfree_skb(skb2); - mctp_test_flow_fini(test, dev, &dst, &tpq, sock); + mctp_test_flow_fini(test, dev, &dst, sock); } /* test that outgoing skbs, after fragmentation, all have the correct MCTP @@ -1008,7 +984,6 @@ static void mctp_test_packet_flow(struct kunit *test) */ static void mctp_test_fragment_flow(struct kunit *test) { - struct mctp_test_pktqueue tpq; struct mctp_flow *flows[2]; struct sk_buff *tx_skbs[2]; struct mctp_test_dev *dev; @@ -1018,17 +993,17 @@ static void mctp_test_fragment_flow(struct kunit *test) u8 dst_eid = 8; int n, rc; - mctp_test_flow_init(test, &dev, &dst, &tpq, &sock, &skb, 100); + mctp_test_flow_init(test, &dev, &dst, &sock, &skb, 100); rc = mctp_local_output(sock->sk, &dst, skb, dst_eid, MCTP_TAG_OWNER); KUNIT_ASSERT_EQ(test, rc, 0); - n = tpq.pkts.qlen; + n = dev->pkts.qlen; KUNIT_ASSERT_EQ(test, n, 2); /* both resulting packets should have the same flow data */ - tx_skbs[0] = skb_dequeue(&tpq.pkts); - tx_skbs[1] = skb_dequeue(&tpq.pkts); + tx_skbs[0] = skb_dequeue(&dev->pkts); + tx_skbs[1] = skb_dequeue(&dev->pkts); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tx_skbs[0]); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tx_skbs[1]); @@ -1044,7 +1019,7 @@ static void mctp_test_fragment_flow(struct kunit *test) kfree_skb(tx_skbs[0]); kfree_skb(tx_skbs[1]); - mctp_test_flow_fini(test, dev, &dst, &tpq, sock); + mctp_test_flow_fini(test, dev, &dst, sock); } #else @@ -1063,7 +1038,6 @@ static void mctp_test_fragment_flow(struct kunit *test) static void mctp_test_route_output_key_create(struct kunit *test) { const u8 dst_eid = 26, src_eid = 15; - struct mctp_test_pktqueue tpq; const unsigned int netid = 50; struct mctp_test_dev *dev; struct mctp_sk_key *key; @@ -1080,7 +1054,7 @@ static void mctp_test_route_output_key_create(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); WRITE_ONCE(dev->mdev->net, netid); - mctp_test_dst_setup(test, &dst, dev, &tpq, 68); + mctp_test_dst_setup(test, &dst, dev, 68); rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock); KUNIT_ASSERT_EQ(test, rc, 0); @@ -1127,14 +1101,13 @@ static void mctp_test_route_output_key_create(struct kunit *test) KUNIT_EXPECT_FALSE(test, key->tag & MCTP_TAG_OWNER); sock_release(sock); - mctp_test_dst_release(&dst, &tpq); + mctp_dst_release(&dst); mctp_test_destroy_dev(dev); } static void mctp_test_route_extaddr_input(struct kunit *test) { static const unsigned char haddr[] = { 0xaa, 0x55 }; - struct mctp_test_pktqueue tpq; struct mctp_skb_cb *cb, *cb2; const unsigned int len = 40; struct mctp_test_dev *dev; @@ -1149,7 +1122,7 @@ static void mctp_test_route_extaddr_input(struct kunit *test) hdr.dest = 8; hdr.flags_seq_tag = FL_S | FL_E | FL_TO; - __mctp_route_test_init(test, &dev, &dst, &tpq, &sock, MCTP_NET_ANY); + __mctp_route_test_init(test, &dev, &dst, &sock, MCTP_NET_ANY); skb = mctp_test_create_skb(&hdr, len); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb); @@ -1178,7 +1151,7 @@ static void mctp_test_route_extaddr_input(struct kunit *test) KUNIT_EXPECT_MEMEQ(test, cb2->haddr, haddr, sizeof(haddr)); kfree_skb(skb2); - __mctp_route_test_fini(test, dev, &dst, &tpq, sock); + __mctp_route_test_fini(test, dev, &dst, sock); } static void mctp_test_route_gw_lookup(struct kunit *test) @@ -1530,14 +1503,13 @@ static void mctp_test_bind_lookup(struct kunit *test) struct socket *socks[ARRAY_SIZE(lookup_binds)]; struct sk_buff *skb_pkt = NULL, *skb_sock = NULL; struct socket *sock_ty0, *sock_expect = NULL; - struct mctp_test_pktqueue tpq; struct mctp_test_dev *dev; struct mctp_dst dst; int rc; rx = test->param_value; - __mctp_route_test_init(test, &dev, &dst, &tpq, &sock_ty0, rx->net); + __mctp_route_test_init(test, &dev, &dst, &sock_ty0, rx->net); /* Create all binds */ for (size_t i = 0; i < ARRAY_SIZE(lookup_binds); i++) { mctp_test_bind_run(test, &lookup_binds[i], @@ -1557,7 +1529,6 @@ static void mctp_test_bind_lookup(struct kunit *test) skb_pkt = mctp_test_create_skb_data(&rx->hdr, &rx->ty); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb_pkt); mctp_test_skb_set_dev(skb_pkt, dev); - mctp_test_pktqueue_init(&tpq); rc = mctp_dst_input(&dst, skb_pkt); if (rx->expect) { @@ -1591,7 +1562,7 @@ cleanup: for (size_t i = 0; i < ARRAY_SIZE(lookup_binds); i++) sock_release(socks[i]); - __mctp_route_test_fini(test, dev, &dst, &tpq, sock_ty0); + __mctp_route_test_fini(test, dev, &dst, sock_ty0); } static struct kunit_case mctp_test_cases[] = { diff --git a/net/mctp/test/utils.c b/net/mctp/test/utils.c index 35f6be8145674b..37f1ba62a2ab0f 100644 --- a/net/mctp/test/utils.c +++ b/net/mctp/test/utils.c @@ -13,7 +13,10 @@ static netdev_tx_t mctp_test_dev_tx(struct sk_buff *skb, struct net_device *ndev) { - kfree_skb(skb); + struct mctp_test_dev *dev = netdev_priv(ndev); + + skb_queue_tail(&dev->pkts, skb); + return NETDEV_TX_OK; } @@ -26,7 +29,7 @@ static void mctp_test_dev_setup(struct net_device *ndev) ndev->type = ARPHRD_MCTP; ndev->mtu = MCTP_DEV_TEST_MTU; ndev->hard_header_len = 0; - ndev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; + ndev->tx_queue_len = 0; ndev->flags = IFF_NOARP; ndev->netdev_ops = &mctp_test_netdev_ops; ndev->needs_free_netdev = true; @@ -51,6 +54,7 @@ static struct mctp_test_dev *__mctp_test_create_dev(unsigned short lladdr_len, dev->ndev = ndev; ndev->addr_len = lladdr_len; dev_addr_set(ndev, lladdr); + skb_queue_head_init(&dev->pkts); rc = register_netdev(ndev); if (rc) { @@ -63,6 +67,11 @@ static struct mctp_test_dev *__mctp_test_create_dev(unsigned short lladdr_len, dev->mdev->net = mctp_default_net(dev_net(ndev)); rcu_read_unlock(); + /* bring the device up; we want to be able to TX immediately */ + rtnl_lock(); + dev_open(ndev, NULL); + rtnl_unlock(); + return dev; } @@ -79,26 +88,15 @@ struct mctp_test_dev *mctp_test_create_dev_lladdr(unsigned short lladdr_len, void mctp_test_destroy_dev(struct mctp_test_dev *dev) { + skb_queue_purge(&dev->pkts); mctp_dev_put(dev->mdev); unregister_netdev(dev->ndev); } -static const unsigned int test_pktqueue_magic = 0x5f713aef; - -void mctp_test_pktqueue_init(struct mctp_test_pktqueue *tpq) -{ - tpq->magic = test_pktqueue_magic; - skb_queue_head_init(&tpq->pkts); -} - static int mctp_test_dst_output(struct mctp_dst *dst, struct sk_buff *skb) { - struct kunit *test = current->kunit_test; - struct mctp_test_pktqueue *tpq = test->priv; - - KUNIT_ASSERT_EQ(test, tpq->magic, test_pktqueue_magic); - - skb_queue_tail(&tpq->pkts, skb); + skb->dev = dst->dev->dev; + dev_queue_xmit(skb); return 0; } @@ -169,11 +167,9 @@ struct mctp_test_route *mctp_test_create_route_gw(struct net *net, return rt; } -/* Convenience function for our test dst; release with mctp_test_dst_release() - */ +/* Convenience function for our test dst; release with mctp_dst_release() */ void mctp_test_dst_setup(struct kunit *test, struct mctp_dst *dst, - struct mctp_test_dev *dev, - struct mctp_test_pktqueue *tpq, unsigned int mtu) + struct mctp_test_dev *dev, unsigned int mtu) { KUNIT_EXPECT_NOT_ERR_OR_NULL(test, dev); @@ -183,15 +179,6 @@ void mctp_test_dst_setup(struct kunit *test, struct mctp_dst *dst, __mctp_dev_get(dst->dev->dev); dst->mtu = mtu; dst->output = mctp_test_dst_output; - mctp_test_pktqueue_init(tpq); - test->priv = tpq; -} - -void mctp_test_dst_release(struct mctp_dst *dst, - struct mctp_test_pktqueue *tpq) -{ - mctp_dst_release(dst); - skb_queue_purge(&tpq->pkts); } void mctp_test_route_destroy(struct kunit *test, struct mctp_test_route *rt) diff --git a/net/mctp/test/utils.h b/net/mctp/test/utils.h index 06bdb6cb5eff65..4cc90c9da4d1bf 100644 --- a/net/mctp/test/utils.h +++ b/net/mctp/test/utils.h @@ -18,6 +18,8 @@ struct mctp_test_dev { unsigned short lladdr_len; unsigned char lladdr[MAX_ADDR_LEN]; + + struct sk_buff_head pkts; }; struct mctp_test_dev; @@ -26,11 +28,6 @@ struct mctp_test_route { struct mctp_route rt; }; -struct mctp_test_pktqueue { - unsigned int magic; - struct sk_buff_head pkts; -}; - struct mctp_test_bind_setup { mctp_eid_t bind_addr; int bind_net; @@ -59,11 +56,7 @@ struct mctp_test_route *mctp_test_create_route_gw(struct net *net, mctp_eid_t gw, unsigned int mtu); void mctp_test_dst_setup(struct kunit *test, struct mctp_dst *dst, - struct mctp_test_dev *dev, - struct mctp_test_pktqueue *tpq, unsigned int mtu); -void mctp_test_dst_release(struct mctp_dst *dst, - struct mctp_test_pktqueue *tpq); -void mctp_test_pktqueue_init(struct mctp_test_pktqueue *tpq); + struct mctp_test_dev *dev, unsigned int mtu); void mctp_test_route_destroy(struct kunit *test, struct mctp_test_route *rt); void mctp_test_skb_set_dev(struct sk_buff *skb, struct mctp_test_dev *dev); struct sk_buff *mctp_test_create_skb(const struct mctp_hdr *hdr, diff --git a/tools/net/ynl/samples/tc-filter-add.c b/tools/net/ynl/samples/tc-filter-add.c index 1f9cd3f62df630..97871e9e9edc6f 100644 --- a/tools/net/ynl/samples/tc-filter-add.c +++ b/tools/net/ynl/samples/tc-filter-add.c @@ -207,7 +207,7 @@ static int tc_filter_del(struct ynl_sock *ys, int ifi) req = tc_deltfilter_req_alloc(); if (!req) { - fprintf(stderr, "tc_deltfilter_req_alloc failedq\n"); + fprintf(stderr, "tc_deltfilter_req_alloc failed\n"); return -1; } memset(req, 0, sizeof(*req)); diff --git a/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh b/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh index 87f89fd92f8c11..ae8abff4be409e 100644 --- a/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh +++ b/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh @@ -249,7 +249,7 @@ function listen_port_and_save_to() { # Just wait for 2 seconds timeout 2 ip netns exec "${NAMESPACE}" \ - socat "${SOCAT_MODE}":"${PORT}",fork "${OUTPUT}" + socat "${SOCAT_MODE}":"${PORT}",fork "${OUTPUT}" 2> /dev/null } # Only validate that the message arrived properly diff --git a/tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh b/tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh index 92eb880c52f280..00758f00efbfad 100755 --- a/tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh +++ b/tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh @@ -75,7 +75,7 @@ setup_v4() { ip neigh get $V4_ADDR1 dev veth0 >/dev/null 2>&1 if [ $? -ne 0 ]; then cleanup_v4 - echo "failed" + echo "failed; is the system using MACAddressPolicy=persistent ?" exit 1 fi diff --git a/tools/testing/selftests/net/lib/py/ksft.py b/tools/testing/selftests/net/lib/py/ksft.py index ebd82940ee5013..531e7fa1b3ea1e 100644 --- a/tools/testing/selftests/net/lib/py/ksft.py +++ b/tools/testing/selftests/net/lib/py/ksft.py @@ -163,7 +163,7 @@ def ksft_flush_defer(): entry = global_defer_queue.pop() try: entry.exec_only() - except BaseException: + except Exception: ksft_pr(f"Exception while handling defer / cleanup (callback {i} of {qlen_start})!") tb = traceback.format_exc() for line in tb.strip().split('\n'): @@ -333,7 +333,21 @@ def ksft_run(cases=None, globs=None, case_pfx=None, args=()): KSFT_RESULT = False cnt_key = 'fail' - ksft_flush_defer() + try: + ksft_flush_defer() + except BaseException as e: + tb = traceback.format_exc() + for line in tb.strip().split('\n'): + ksft_pr("Exception|", line) + if isinstance(e, KeyboardInterrupt): + ksft_pr() + ksft_pr("WARN: defer() interrupted, cleanup may be incomplete.") + ksft_pr(" Attempting to finish cleanup before exiting.") + ksft_pr(" Interrupt again to exit immediately.") + ksft_pr() + stop = True + # Flush was interrupted, try to finish the job best we can + ksft_flush_defer() if not cnt_key: cnt_key = 'pass' if KSFT_RESULT else 'fail' |
