+i40e-dkms (2.2.4-1~u14.04+mos1) mos; urgency=low
+
+ * Update to version 2.2.4
+
+ -- Ivan Suzdal <mos-linux@mirantis.com> Fri, 27 Oct 2017 16:34:44 +0300
+
i40e-dkms (2.1.26-1~u14.04+mos1) mos; urgency=low
* Update to version 2.1.26 (LP: 1712793)
+++ /dev/null
-23200 3 i40e-2.1.26/i40e.7
-41054 52 i40e-2.1.26/README
-42709 7 i40e-2.1.26/pci.updates
-23863 11 i40e-2.1.26/i40e.spec
-43521 19 i40e-2.1.26/COPYING
-11661 364 i40e-2.1.26/src/i40e_register.h
-36073 9 i40e-2.1.26/src/i40e_dcb_nl.c
-00545 82 i40e-2.1.26/src/i40e_adminq_cmd.h
-39134 46 i40e-2.1.26/src/i40e_nvm.c
-27049 47 i40e-2.1.26/src/i40e_type.h
-28147 22 i40e-2.1.26/src/i40e_prototype.h
-62648 57 i40e-2.1.26/src/kcompat.c
-58605 4 i40e-2.1.26/src/i40e_status.h
-38420 189 i40e-2.1.26/src/i40e_common.c
-25757 6 i40e-2.1.26/src/i40e_lan_hmc.h
-25014 2 i40e-2.1.26/src/i40e_diag.h
-25701 4 i40e-2.1.26/src/i40e_helper.h
-56270 8 i40e-2.1.26/src/i40e_hmc.h
-43670 23 i40e-2.1.26/src/i40e_client.c
-02605 177 i40e-2.1.26/src/i40e_ethtool.c
-35141 7 i40e-2.1.26/src/i40e_trace.h
-27367 30 i40e-2.1.26/src/i40e_adminq.c
-24070 2 i40e-2.1.26/src/i40e_devids.h
-13309 5 i40e-2.1.26/src/i40e_diag.c
-58300 36 i40e-2.1.26/src/i40e.h
-14892 19 i40e-2.1.26/src/i40e_txrx.h
-15513 23 i40e-2.1.26/src/virtchnl.h
-25383 27 i40e-2.1.26/src/i40e_ptp.c
-60199 35 i40e-2.1.26/src/i40e_lan_hmc.c
-22894 27 i40e-2.1.26/src/i40e_dcb.c
-64920 168 i40e-2.1.26/src/kcompat.h
-18552 366 i40e-2.1.26/src/i40e_main.c
-12661 7 i40e-2.1.26/src/i40e_client.h
-06581 6 i40e-2.1.26/src/Makefile
-38484 4 i40e-2.1.26/src/i40e_osdep.h
-03889 3 i40e-2.1.26/src/i40e_alloc.h
-27559 80 i40e-2.1.26/src/i40e_debugfs.c
-44588 1 i40e-2.1.26/src/Module.supported
-22387 6 i40e-2.1.26/src/i40e_virtchnl_pf.h
-58955 5 i40e-2.1.26/src/i40e_adminq.h
-28000 12 i40e-2.1.26/src/common.mk
-60958 93 i40e-2.1.26/src/i40e_virtchnl_pf.c
-32557 101 i40e-2.1.26/src/i40e_txrx.c
-12466 6 i40e-2.1.26/src/i40e_dcb.h
-03386 11 i40e-2.1.26/src/i40e_hmc.c
-33977 7 i40e-2.1.26/scripts/set_irq_affinity
-20875 2 i40e-2.1.26/scripts/dump_tables
-49876 5 i40e-2.1.26/scripts/virt_perf_default
--- /dev/null
+23200 3 i40e-2.2.4/i40e.7
+41054 52 i40e-2.2.4/README
+57015 7 i40e-2.2.4/pci.updates
+46157 11 i40e-2.2.4/i40e.spec
+43521 19 i40e-2.2.4/COPYING
+11661 364 i40e-2.2.4/src/i40e_register.h
+36073 9 i40e-2.2.4/src/i40e_dcb_nl.c
+00545 82 i40e-2.2.4/src/i40e_adminq_cmd.h
+39134 46 i40e-2.2.4/src/i40e_nvm.c
+56456 47 i40e-2.2.4/src/i40e_type.h
+28147 22 i40e-2.2.4/src/i40e_prototype.h
+62648 57 i40e-2.2.4/src/kcompat.c
+58605 4 i40e-2.2.4/src/i40e_status.h
+04760 190 i40e-2.2.4/src/i40e_common.c
+25757 6 i40e-2.2.4/src/i40e_lan_hmc.h
+25014 2 i40e-2.2.4/src/i40e_diag.h
+25701 4 i40e-2.2.4/src/i40e_helper.h
+56270 8 i40e-2.2.4/src/i40e_hmc.h
+43670 23 i40e-2.2.4/src/i40e_client.c
+61287 178 i40e-2.2.4/src/i40e_ethtool.c
+06523 7 i40e-2.2.4/src/i40e_trace.h
+27367 30 i40e-2.2.4/src/i40e_adminq.c
+24070 2 i40e-2.2.4/src/i40e_devids.h
+13309 5 i40e-2.2.4/src/i40e_diag.c
+38197 36 i40e-2.2.4/src/i40e.h
+45149 19 i40e-2.2.4/src/i40e_txrx.h
+15513 23 i40e-2.2.4/src/virtchnl.h
+25383 27 i40e-2.2.4/src/i40e_ptp.c
+60199 35 i40e-2.2.4/src/i40e_lan_hmc.c
+22894 27 i40e-2.2.4/src/i40e_dcb.c
+15728 169 i40e-2.2.4/src/kcompat.h
+40073 366 i40e-2.2.4/src/i40e_main.c
+12661 7 i40e-2.2.4/src/i40e_client.h
+06581 6 i40e-2.2.4/src/Makefile
+38484 4 i40e-2.2.4/src/i40e_osdep.h
+03889 3 i40e-2.2.4/src/i40e_alloc.h
+12976 80 i40e-2.2.4/src/i40e_debugfs.c
+44588 1 i40e-2.2.4/src/Module.supported
+22387 6 i40e-2.2.4/src/i40e_virtchnl_pf.h
+58955 5 i40e-2.2.4/src/i40e_adminq.h
+28000 12 i40e-2.2.4/src/common.mk
+48557 93 i40e-2.2.4/src/i40e_virtchnl_pf.c
+62481 100 i40e-2.2.4/src/i40e_txrx.c
+12466 6 i40e-2.2.4/src/i40e_dcb.h
+03386 11 i40e-2.2.4/src/i40e_hmc.c
+33977 7 i40e-2.2.4/scripts/set_irq_affinity
+20875 2 i40e-2.2.4/scripts/dump_tables
+49876 5 i40e-2.2.4/scripts/virt_perf_default
Name: i40e
Summary: Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
-Version: 2.1.26
+Version: 2.2.4
Release: 1
Source: %{name}-%{version}.tar.gz
Vendor: Intel Corporation
8086 0008 Ethernet Network Adapter OCP XXV710-1
8086 0009 Ethernet 25G 2P XXV710 Adapter
8086 4001 Ethernet Network Adapter XXV710-2
+ 37cc Ethernet Connection X722
37ce Ethernet Connection X722 for 10GbE backplane
1590 0215 Ethernet 10Gb 2-port 568i Adapter
17aa 4023 Ethernet Connection X722 for 10GbE backplane
#define I40E_CURRENT_NVM_VERSION_LO 0x40
#define I40E_RX_DESC(R, i) \
- (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
+ (&(((union i40e_rx_desc *)((R)->desc))[i]))
#define I40E_TX_DESC(R, i) \
(&(((struct i40e_tx_desc *)((R)->desc))[i]))
#define I40E_TX_CTXTDESC(R, i) \
struct i40e_hw *hw = &pf->hw;
u32 val;
- /* definitely clear the PBA here, as this function is meant to
- * clean out all previous interrupts AND enable the interrupt
- */
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
}
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
-void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba);
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
int i40e_open(struct net_device *netdev);
int i40e_close(struct net_device *netdev);
{
struct i40e_aq_desc desc;
i40e_status status;
+ u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
if (!abilities)
return I40E_ERR_PARAM;
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_get_phy_abilities);
+ do {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_abilities);
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
- if (abilities_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (abilities_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
- if (qualified_modules)
- desc.params.external.param0 |=
+ if (qualified_modules)
+ desc.params.external.param0 |=
CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
- if (report_init)
- desc.params.external.param0 |=
+ if (report_init)
+ desc.params.external.param0 |=
CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
- status = i40e_asq_send_command(hw, &desc, abilities, abilities_size,
- cmd_details);
+ status = i40e_asq_send_command(hw, &desc, abilities,
+ abilities_size, cmd_details);
- if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
- status = I40E_ERR_UNKNOWN_PHY;
+ if (status != I40E_SUCCESS)
+ break;
+
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) {
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) {
+ usleep_range(1000, 2000);
+ total_delay++;
+ status = I40E_ERR_TIMEOUT;
+ }
+ } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) &&
+ (total_delay < max_delay));
+
+ if (status != I40E_SUCCESS)
+ return status;
if (report_init) {
if (hw->mac.type == I40E_MAC_XL710 &&
}
}
+/* Helper macros for printing upper half of the 32byte descriptor. */
+#ifdef I40E_32BYTE_RX
+#define RXD_RSVD1(_rxd) ((_rxd)->read.rsvd1)
+#define RXD_RSVD2(_rxd) ((_rxd)->read.rsvd2)
+#else
+#define RXD_RSVD1(_rxd) 0ULL
+#define RXD_RSVD2(_rxd) 0ULL
+#endif
+
/**
* i40e_dbg_dump_desc - handles dump desc write into command datum
* @cnt: number of arguments that the user supplied
" d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
i, rxd->read.pkt_addr,
rxd->read.hdr_addr,
- rxd->read.rsvd1, rxd->read.rsvd2);
+ RXD_RSVD1(rxd), RXD_RSVD2(rxd));
}
}
} else if (cnt == 3) {
"vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
rxd->read.pkt_addr, rxd->read.hdr_addr,
- rxd->read.rsvd1, rxd->read.rsvd2);
+ RXD_RSVD1(rxd), RXD_RSVD2(rxd));
}
} else {
dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
u8 autoneg;
struct ethtool_link_ksettings ksettings_real, *ksettings;
u32 advertise[ETHTOOL_LINK_MODE_MASK_U32];
+ u32 safe_supported[ETHTOOL_LINK_MODE_MASK_U32];
+ int i;
ksettings = &ksettings_real;
memcpy(ksettings,
return -EOPNOTSUPP;
}
+ /* save autoneg and speed out of ksettings */
+ autoneg = ksettings->base.autoneg;
+ memcpy(advertise, &ksettings->link_modes.advertising,
+ sizeof(advertise));
+
+ memset(&safe_ksettings, 0, sizeof(safe_ksettings));
+ /* Get link modes supported by hardware... */
+ i40e_get_link_settings_link_down(hw, &safe_ksettings, pf);
+ memcpy(safe_supported, &safe_ksettings.link_modes.supported,
+ sizeof(safe_supported));
+ /* ...and check against modes requested by user.
+ * Return an error if unsupported mode was set.
+ */
+ for (i = 0; i < ETHTOOL_LINK_MODE_MASK_U32; i++) {
+ if ((advertise[i] & safe_supported[i]) != advertise[i])
+ return -EINVAL;
+ }
+
/* get our own copy of the bits to check against */
memset(&safe_ksettings, 0, sizeof(struct ethtool_link_ksettings));
safe_ksettings.base.cmd = ksettings->base.cmd;
ksettings->base.link_mode_masks_nwords;
i40e_get_link_settings(netdev, &safe_ksettings);
- /* save autoneg and speed out of ksettings */
- autoneg = ksettings->base.autoneg;
- memcpy((void *)advertise,
- &ksettings->base.link_mode_masks[ETHTOOL_LINK_MODE_MASK_U32],
- sizeof(advertise));
-
/* set autoneg and speed back to what they currently are */
ksettings->base.autoneg = safe_ksettings.base.autoneg;
memcpy((void *)ksettings->link_modes.advertising,
safe_ksettings.link_modes.advertising,
sizeof(advertise));
- /* If ksettings and safe_ksettings are not the same now, then they are
- * trying to set something that we do not support
+ /* If ksettings.base and safe_ksettings.base are not the same now,
+ * then they are trying to set something that we do not support.
*/
- if (memcmp(ksettings, &safe_ksettings,
+ if (memcmp(&ksettings->base, &safe_ksettings.base,
sizeof(struct ethtool_link_settings)))
return -EOPNOTSUPP;
autoneg_changed = true;
}
}
+
if (advertise[0] & ADVERTISED_100baseT_Full)
config.link_speed |= I40E_LINK_SPEED_100MB;
if (advertise[0] & ADVERTISED_1000baseT_Full ||
if (!vf) {
if (ring >= vsi->num_queue_pairs)
return -EINVAL;
- dest_seid = vsi->id;
+ dest_seid = vsi->seid;
} else {
/* VFs are zero-indexed, so we subtract one here */
vf--;
#define DRV_VERSION_DESC ""
#define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 1
-#define DRV_VERSION_BUILD 26
+#define DRV_VERSION_MINOR 2
+#define DRV_VERSION_BUILD 4
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
**/
static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
{
+#ifndef HAVE_XPS_QOS_SUPPORT
struct i40e_vsi *vsi = ring->vsi;
+#endif
int cpu;
if (!ring->q_vector || !ring->netdev)
return;
+#ifndef HAVE_XPS_QOS_SUPPORT
+ /* Some older kernels do not support XPS with QoS */
+ if (vsi->tc_config.numtc > 1) {
#ifndef HAVE_NETDEV_TC_RESETS_XPS
- /* Older kernels do not reset the XPS map when configuring traffic
- * classes. To allow selection based on TC we need to clear the
- * mapping here.
- */
- if ((vsi->tc_config.numtc > 1) &&
- test_and_clear_bit(__I40E_TX_XPS_INIT_DONE, ring->state)) {
+ /* Additionally, some kernels do not properly clear the XPS
+ * mapping when the number of traffic classes is changed. In
+ * order to support these kernels we work around this by
+ * setting the XPS mapping to the empty cpu set.
+ */
cpumask_var_t mask;
+ /* Only clear the settings if we initialized XPS */
+ if (!test_and_clear_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
+ return;
+
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return;
+
netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
free_cpumask_var(mask);
+#endif /* !HAVE_NETDEV_TC_RESETS_XPS */
return;
}
-#endif
- if ((vsi->tc_config.numtc <= 1) &&
- !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state)) {
- cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
- netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
- ring->queue_index);
- }
+#endif /* !HAVE_XPS_QOS_SUPPORT */
+ /* We only initialize XPS once, so as not to overwrite user settings */
+ if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
+ return;
+
+ cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
+ netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
+ ring->queue_index);
}
/**
rx_ctx.qlen = ring->count;
/* use 32 byte descriptors */
+#ifdef I40E_32BYTE_RX
rx_ctx.dsize = 1;
+#else
+ /* use 16 byte descriptors */
+ rx_ctx.dsize = 0;
+#endif
/* descriptor type is always zero
* rx_ctx.dtype = 0;
rx_ctx.hsplit_0 = 0;
rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
- rx_ctx.lrxqthresh = 2;
+ rx_ctx.lrxqthresh = 1;
rx_ctx.crcstrip = 1;
rx_ctx.l2tsel = 1;
/* this controls whether VLAN is stripped from inner headers */
/**
* i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
* @pf: board private structure
- * @clearpba: true when all pending interrupt events should be cleared
**/
-void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u32 val;
val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
- (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
(I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
wr32(hw, I40E_PFINT_DYN_CTL0, val);
for (i = 0; i < vsi->num_q_vectors; i++)
i40e_irq_dynamic_enable(vsi, i);
} else {
- i40e_irq_dynamic_enable_icr0(pf, true);
+ i40e_irq_dynamic_enable_icr0(pf);
}
i40e_flush(&pf->hw);
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
if (!test_bit(__I40E_DOWN, pf->state)) {
i40e_service_event_schedule(pf);
- i40e_irq_dynamic_enable_icr0(pf, false);
+ i40e_irq_dynamic_enable_icr0(pf);
}
return ret;
i40e_flush(hw);
- i40e_irq_dynamic_enable_icr0(pf, true);
+ i40e_irq_dynamic_enable_icr0(pf);
return err;
}
I40E_FLAG_MSIX_ENABLED;
/* Set default ITR */
- pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
- pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
+ pf->rx_itr_default = I40E_ITR_RX_DEF;
+ pf->tx_itr_default = I40E_ITR_TX_DEF;
/* Depending on PF configurations, it is possible that the RSS
* maximum might end up larger than the available queues
*/
i40e_rx_template,
TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ union i40e_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb),
DEFINE_EVENT(
i40e_rx_template, i40e_clean_rx_irq,
TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ union i40e_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
DEFINE_EVENT(
i40e_rx_template, i40e_clean_rx_irq_rx,
TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ union i40e_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
#endif /* HAVE_NDO_GET_STATS64 */
/* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
union i40e_rx_desc *rx_desc;
struct i40e_rx_buffer *bi;
+ /* Hardware only fetches new descriptors in cache lines of 8,
+ * essentially ignoring the lower 3 bits of the tail register. We want
+ * to ensure our tail writes are aligned to avoid unnecessary work. We
+ * can't simply round down the cleaned count, since we might fail to
+ * allocate some buffers. What we really want is to ensure that
+ * next_to_used + cleaned_count produces an aligned value.
+ */
+ cleaned_count -= (ntu + cleaned_count) & 0x7;
+
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
return false;
u32 val;
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- /* Don't clear PBA because that can cause lost interrupts that
- * came in while we were cleaning/polling
- */
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
(itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
/* If we don't have MSIX, then we only need to re-enable icr0 */
if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
- i40e_irq_dynamic_enable_icr0(vsi->back, false);
+ i40e_irq_dynamic_enable_icr0(vsi->back);
return;
}
/* write last descriptor with EOP bit */
td_cmd |= I40E_TX_DESC_CMD_EOP;
- /* We can OR these values together as they both are checked against
- * 4 below and at this point desc_count will be used as a boolean value
- * after this if/else block.
+ /* We OR these values together to check both against 4 (WB_STRIDE)
+ * below. This is safe since we don't re-use desc_count afterwards.
*/
desc_count |= ++tx_ring->packet_stride;
- /* Algorithm to optimize tail and RS bit setting:
- * if queue is stopped
- * mark RS bit
- * reset packet counter
- * else if xmit_more is supported and is true
- * advance packet counter to 4
- * reset desc_count to 0
- *
- * if desc_count >= 4
- * mark RS bit
- * reset packet counter
- * if desc_count > 0
- * update tail
- *
- * Note: If there are less than 4 descriptors
- * pending and interrupts were disabled the service task will
- * trigger a force WB.
- */
- if (netif_xmit_stopped(txring_txq(tx_ring))) {
- goto do_rs;
-#ifdef HAVE_SKB_XMIT_MORE
- } else if (skb->xmit_more) {
- /* set stride to arm on next packet and reset desc_count */
- tx_ring->packet_stride = WB_STRIDE;
- desc_count = 0;
-#endif /* HAVE_SKB_XMIT_MORE */
- } else if (desc_count >= WB_STRIDE) {
-do_rs:
+ if (desc_count >= WB_STRIDE) {
/* write last descriptor with RS bit set */
td_cmd |= I40E_TX_DESC_CMD_RS;
tx_ring->packet_stride = 0;
/* notify HW of packet */
#ifdef HAVE_SKB_XMIT_MORE
- if (desc_count) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
#define I40E_ITR_8K 0x003E
#define I40E_ITR_4K 0x007A
#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
-#define I40E_ITR_RX_DEF I40E_ITR_20K
-#define I40E_ITR_TX_DEF I40E_ITR_20K
+#define I40E_ITR_RX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \
+ I40E_ITR_DYNAMIC)
+#define I40E_ITR_TX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \
+ I40E_ITR_DYNAMIC)
#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
*/
#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
+#ifdef I40E_32BYTE_RX
#define i40e_rx_desc i40e_32byte_rx_desc
+#else
+#define i40e_rx_desc i40e_16byte_rx_desc
+#endif
#ifdef HAVE_STRUCT_DMA_ATTRS
#define I40E_RX_DMA_ATTR NULL
}
/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */
#define I40E_RX_INCREMENT(r, i) \
do { \
(i)++; \
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
+/* Max timeout in ms for the phy to respond */
+#define I40E_MAX_PHY_TIMEOUT 500
+
/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
#define I40E_MS_TO_GTIME(time) ((time) * 1000)
struct i40e_hw *hw = &pf->hw;
u16 vsi_queue_id, pf_queue_id;
enum i40e_queue_type qtype;
- u16 next_q, vector_id;
+ u16 next_q, vector_id, size;
u32 reg, reg_idx;
u16 itr_idx = 0;
vsi_queue_id + 1));
}
- next_q = find_first_bit(&linklistmap,
- (I40E_MAX_VSI_QP *
- I40E_VIRTCHNL_SUPPORTED_QTYPES));
+ size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ next_q = find_first_bit(&linklistmap, size);
+ if (unlikely(next_q == size))
+ goto irq_list_done;
+
vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
wr32(hw, reg_idx, reg);
- while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+ while (next_q < size) {
switch (qtype) {
case I40E_QUEUE_TYPE_RX:
reg_idx = I40E_QINT_RQCTL(pf_queue_id);
break;
}
- next_q = find_next_bit(&linklistmap,
- (I40E_MAX_VSI_QP *
- I40E_VIRTCHNL_SUPPORTED_QTYPES),
- next_q + 1);
- if (next_q <
- (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+ next_q = find_next_bit(&linklistmap, size, next_q + 1);
+ if (next_q < size) {
vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id,
rx_ctx.dsize = 1;
/* default values */
- rx_ctx.lrxqthresh = 2;
+ rx_ctx.lrxqthresh = 1;
rx_ctx.crcstrip = 1;
rx_ctx.prefena = 1;
rx_ctx.l2tsel = 1;
i40e_free_vfs(pf);
err_iov:
/* Re-enable interrupt 0. */
- i40e_irq_dynamic_enable_icr0(pf, false);
+ i40e_irq_dynamic_enable_icr0(pf);
return ret;
}
struct i40e_mac_filter *f;
struct i40e_vf *vf;
int ret = 0;
+ struct hlist_node *h;
int bkt;
/* validate the request */
/* Delete all the filters for this VSI - we're going to kill it
* anyway.
*/
- hash_for_each(vsi->mac_filter_hash, bkt, f, hlist)
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
__i40e_del_filter(vsi, f);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
- dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
/* program mac filter */
if (i40e_sync_vsi_filters(vsi)) {
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
goto error_param;
}
ether_addr_copy(vf->default_lan_addr.addr, mac);
- vf->pf_set_mac = true;
+
+ if (is_zero_ether_addr(mac)) {
+ vf->pf_set_mac = false;
+ dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
+ } else {
+ vf->pf_set_mac = true;
+ dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
+ mac, vf_id);
+ }
+
/* Force the VF driver stop so it has to reload with new MAC address */
i40e_vc_disable_vf(vf);
dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
#endif
#endif /* < 4.8.0 */
#define HAVE_NDO_GET_PHYS_PORT_ID
+#define HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK
#endif /* >= 3.12.0 */
/*****************************************************************************/
#define HAVE_SKBUFF_CSUM_LEVEL
#endif /* >= RH 7.1 */
+/* RHEL 7.3 backported xmit_more */
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))
+#define HAVE_SKB_XMIT_MORE
+#endif /* >= RH 7.3 */
+
#undef GENMASK
#define GENMASK(h, l) \
(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
#endif /* !SLE_VERSION(12,3,0) */
#else
#define HAVE_UDP_ENC_RX_OFFLOAD
-#define HAVE_XPS_QOS_SUPPORT
#endif /* 4.8.0 */
/*****************************************************************************/
/*****************************************************************************/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)))
+#define HAVE_DEV_WALK_API
+#endif
#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0)))
#define HAVE_STRUCT_DMA_ATTRS
+#define HAVE_NETDEVICE_MIN_MAX_MTU
#endif
#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0)))
#ifndef ETH_MIN_MTU
#define ETH_MIN_MTU 68
#endif /* ETH_MIN_MTU */
-#else
+#else /* >= 4.10 */
#define HAVE_NETDEVICE_MIN_MAX_MTU
#define HAVE_SWIOTLB_SKIP_CPU_SYNC
#define HAVE_NETDEV_TC_RESETS_XPS
+#define HAVE_XPS_QOS_SUPPORT
+#define HAVE_DEV_WALK_API
#endif /* 4.10.0 */
/*****************************************************************************/
#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE
#endif /* 4.13.0 */
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0))
+#else /* > 4.14 */
+#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
+#endif /* 4.14.0 */
+
#endif /* _KCOMPAT_H_ */