+i40e-dkms (1.6.42-1~u14.04+mos1) mos; urgency=low
+
+ * Update to version 1.6.42
+
+ -- Ivan Suzdal <mos-linux@mirantis.com> Thu, 23 Mar 2017 18:27:01 +0300
+
i40e-dkms (1.5.19-1~u14.04+mos2) MOS; urgency=low
* LP 1619632 bug fix. i40e-dkms is not built after kernel update
+PACKAGE_NAME="i40e"
+PACKAGE_VERSION="#MODULE_VERSION#"
MAKE[0]="'make' -C src/ KSRC=/lib/modules/${kernelver}/build"
CLEAN="make -C src/ clean"
-BUILT_MODULE_NAME[0]=i40e
-BUILT_MODULE_LOCATION[0]=src/
+BUILT_MODULE_NAME[0]="i40e"
+BUILT_MODULE_LOCATION[0]="src/"
DEST_MODULE_LOCATION[0]="/updates"
-PACKAGE_NAME=i40e-dkms
-PACKAGE_VERSION=1.5.19
REMAKE_INITRD=yes
AUTOINSTALL=yes
+++ /dev/null
---- a/i40e-1.2.38/src/Makefile
-+++ b/i40e-1.2.38/src/Makefile
-@@ -103,6 +103,12 @@
- KVER_CODE := $(shell $(CC) $(EXTRA_CFLAGS) -E -dM $(VSP) 2> /dev/null |\
- grep -m 1 LINUX_VERSION_CODE | awk '{ print $$3 }' | sed 's/\"//g')
-
-+# Ubuntu kernel 3.13.0-30-generic does not have UTS_UBUNTU_RELEASE_ABI, but
-+# already includes pkt_hash_types. Pass the UTS_UBUNTU_RELEASE_ABI to the compiler.
-+ifeq ($(KVER),3.13.0-30-generic)
-+ CFLAGS_EXTRA += -DUTS_UBUNTU_RELEASE_ABI=30
-+endif
-+
- # set the install path before and after 3.2.0
- ifeq (1,$(shell [ $(KVER_CODE) -lt 197120 ] && echo 1 || echo 0))
- INSTDIR := /lib/modules/$(KVER)/kernel/drivers/net
+++ /dev/null
-0001-mirantis-fix-pass-UTS_UBUNTU_ABI-to-3.13.0-30-generic.patch
+++ /dev/null
-#!/bin/sh
-# Copyright (C) 2002-2005 Flavio Stanchina
-# Copyright (C) 2005-2006 Aric Cyr
-# Copyright (C) 2007 Mario Limonciello
-# Copyright (C) 2009 Alberto Milone
-
-# MOS packages has a suffix in its name
-# separated by the '~', which is actually
-# recognized as a part of the package version.
-# That's wrong and follows to DKMS fall during
-# the installation the package.
-# To fix it the CVERSION was extended with
-# the string "| cut -d\~ -f1" cutting the suffix
-
-set -e
-
-NAME=i40e
-PACKAGE_NAME=$NAME-dkms
-DEB_NAME=$(echo $PACKAGE_NAME | sed 's,_,-,')
-CVERSION=`dpkg-query -W -f='${Version}' $DEB_NAME | awk -F "-" '{print $1}' | cut -d\: -f2 | cut -d\~ -f1`
-ARCH=`dpkg --print-architecture`
-
-dkms_configure () {
- for POSTINST in /usr/lib/dkms/common.postinst "/usr/share/$PACKAGE_NAME/postinst"; do
- if [ -f "$POSTINST" ]; then
- "$POSTINST" "$NAME" "$CVERSION" "/usr/share/$PACKAGE_NAME" "$ARCH" "$2"
- return $?
- fi
- echo "WARNING: $POSTINST does not exist." >&2
- done
- echo "ERROR: DKMS version is too old and $PACKAGE_NAME was not" >&2
- echo "built with legacy DKMS support." >&2
- echo "You must either rebuild $PACKAGE_NAME with legacy postinst" >&2
- echo "support or upgrade DKMS to a more current version." >&2
- return 1
-}
-
-case "$1" in
- configure)
- dkms_configure
- ;;
-
- abort-upgrade|abort-remove|abort-deconfigure)
- ;;
-
- *)
- echo "postinst called with unknown argument \`$1'" >&2
- exit 1
- ;;
-esac
-
-# dh_installdeb will replace this with shell code automatically
-# generated by other debhelper scripts.
-
-#DEBHELPER#
-
-exit 0
+++ /dev/null
-#!/bin/sh
-
-NAME=i40e
-VERSION=1.5.19
-
-set -e
-
-case "$1" in
- remove|upgrade|deconfigure)
- if [ "`dkms status -m $NAME`" ]; then
- dkms remove -m $NAME -v $VERSION --all
- fi
- ;;
-
- failed-upgrade)
- ;;
-
- *)
- echo "prerm called with unknown argument \`$1'" >&2
- exit 1
- ;;
-esac
-
-#DEBHELPER#
-
-exit 0
-
-
#!/usr/bin/make -f
-# -*- makefile -*-
-
-# Uncomment this to turn on verbose mode.
#export DH_VERBOSE=1
+export VERSION := $(shell dpkg-parsechangelog | awk '/^Version/ {print $$2;}' | cut -d'-' -f1)
+export NAME := i40e
-DEB_NAME=i40e
-NAME=i40e
-VERSION=1.5.19
-
-configure: configure-stamp
-configure-stamp:
- dh_testdir
- touch configure-stamp
-
-
-build: build-stamp
-
-build-stamp: configure-stamp
- dh_testdir
- $(MAKE)
- touch $@
-
-clean:
- dh_testdir
- dh_testroot
- rm -f build-stamp configure-stamp
- -$(MAKE) clean
- dh_clean
-
-install: build
- dh_testdir
- dh_testroot
- dh_prep
- dh_installdirs
- $(MAKE) DESTDIR=$(CURDIR)/debian/$(DEB_NAME)-dkms NAME=$(NAME) VERSION=$(VERSION) install
-
-binary-arch: build install
-
-binary-indep: build install
- dh_testdir
- dh_testroot
- dh_link
- dh_strip
- dh_compress
- dh_fixperms
- dh_installdeb
- dh_shlibdeps
- dh_gencontrol
- dh_md5sums
- dh_builddeb
+%:
+ dh $@ --with dkms
-binary: binary-indep binary-arch
-.PHONY: build clean binary-indep binary-arch binary install configure
+override_dh_dkms:
+ dh_dkms -V $(VERSION)
--- /dev/null
+3.0 (quilt)
+++ /dev/null
-36453 5 i40e-1.5.19/pci.updates
-57941 4 i40e-1.5.19/src/i40e_helper.h
-44588 1 i40e-1.5.19/src/Module.supported
-29827 72 i40e-1.5.19/src/i40e_adminq_cmd.h
-47545 149 i40e-1.5.19/src/kcompat.h
-48944 84 i40e-1.5.19/src/i40e_debugfs.c
-61620 91 i40e-1.5.19/src/i40e_txrx.c
-22166 6 i40e-1.5.19/src/i40e_lan_hmc.h
-09578 46 i40e-1.5.19/src/i40e_nvm.c
-27994 7 i40e-1.5.19/src/Makefile
-27208 30 i40e-1.5.19/src/i40e_adminq.c
-07981 3 i40e-1.5.19/src/i40e_alloc.h
-54442 78 i40e-1.5.19/src/i40e_virtchnl_pf.c
-56064 7 i40e-1.5.19/src/i40e_dcb.h
-59117 2 i40e-1.5.19/src/i40e_diag.h
-52213 24 i40e-1.5.19/src/i40e_prototype.h
-51547 23 i40e-1.5.19/src/i40e_ptp.c
-17294 10 i40e-1.5.19/src/common.mk
-10518 37 i40e-1.5.19/src/i40e_dcb.c
-58990 5 i40e-1.5.19/src/i40e_osdep.h
-14633 9 i40e-1.5.19/src/i40e_dcb_nl.c
-25797 334 i40e-1.5.19/src/i40e_main.c
-06064 364 i40e-1.5.19/src/i40e_register.h
-36665 54 i40e-1.5.19/src/kcompat.c
-20253 5 i40e-1.5.19/src/i40e_adminq.h
-64861 6 i40e-1.5.19/src/i40e_virtchnl_pf.h
-17075 47 i40e-1.5.19/src/i40e_fcoe.c
-63972 8 i40e-1.5.19/src/i40e_hmc.h
-30612 55 i40e-1.5.19/src/i40e_type.h
-13567 135 i40e-1.5.19/src/i40e_ethtool.c
-60171 4 i40e-1.5.19/src/i40e_status.h
-23798 14 i40e-1.5.19/src/i40e_virtchnl.h
-24081 6 i40e-1.5.19/src/i40e_diag.c
-32236 2 i40e-1.5.19/src/i40e_devids.h
-63581 11 i40e-1.5.19/src/i40e_hmc.c
-20223 194 i40e-1.5.19/src/i40e_common.c
-63399 41 i40e-1.5.19/src/i40e_lan_hmc.c
-58183 4 i40e-1.5.19/src/i40e_fcoe.h
-60497 36 i40e-1.5.19/src/i40e.h
-10029 16 i40e-1.5.19/src/i40e_txrx.h
-33977 7 i40e-1.5.19/scripts/set_irq_affinity
-20875 2 i40e-1.5.19/scripts/dump_tables
-02733 18 i40e-1.5.19/COPYING
-52431 10 i40e-1.5.19/i40e.spec
-65334 46 i40e-1.5.19/README
-05363 3 i40e-1.5.19/i40e.7
===============================================================================
-April 6, 2016
+November 28, 2016
===============================================================================
The driver in this release is compatible with devices based on the following:
* Intel(R) Ethernet Controller X710
* Intel(R) Ethernet Controller XL710
- * Intel(R) Ethernet Controller X722
+ * Intel(R) Ethernet Controller XXV710
+
+
For information on how to identify your adapter, go to the Adapter &
Driver ID Guide at:
http://www.intel.com/support
-SFP+ Devices with Pluggable Optics
-----------------------------------
+SFP+ and QSFP+ Devices:
+NOTE:
+* Some Intel branded network adapters based on the X710/XL710 controller
+ may only support Intel branded modules for 40Gbps and 10Gbps. For other
+ connections based on the X710/XL710 controller, support is dependent on
+ your system board. Please see your vendor for details. Intel recommends
+ using Intel optics; other modules may function but are not validated by
+ Intel. Contact Intel for supported media types.
+
+Supplier Type Part Numbers
SR Modules
-----------
- Intel DUAL RATE 1G/10G SFP+ SR (bailed) E10GSFPSR
-
+ Intel DUAL RATE 1G/10G SFP+ SR (bailed) E10GSFPSR
LR Modules
-----------
- Intel DUAL RATE 1G/10G SFP+ LR (bailed E10GSFPLR
-
+ Intel DUAL RATE 1G/10G SFP+ LR (bailed) E10GSFPLR
1G SFP Modules
---------------
-The following is a list of 3rd party SFP modules that have received some
-testing. Not all modules are applicable to all devices.
-
-Supplier Type Part Numbers
-Finisar 1000BASE-T SFP FCLF-8251-3
-Kinnex A 1000BASE-T SFP XSFP-T-RJ12-0101-DLL
-Avago 1000BASE-T SFP ABCU-5710RZ
-
+ The following is a list of 3rd party SFP modules that have received some
+ testing. Not all modules are applicable to all devices.
+ Finisar 1000BASE-T SFP FCLF-8251-3
+ Kinnex A 1000BASE-T SFP XSFP-T-RJ12-0101-DLL
+ Avago 1000BASE-T SFP ABCU-5710RZ
QSFP+ Modules
--------------
-NOTE: Intel branded network adapters based on the X710/XL710 controller
- (for example, Intel(R) Ethernet Converged Network Adapter XL710-Q1) support
- the E40GQSFPLR module. For other connections based on the X710/XL710
- controller, support is dependent on your system board. Please see your vendor
- for details.
-
- Intel TRIPLE RATE 1G/10G/40G QSFP+ SR (bailed) E40GQSFPSR
- Intel TRIPLE RATE 1G/10G/40G QSFP+ LR (bailed) E40GQSFPLR
- QSFP+ 1G speed is not supported on XL710 based devices.
-
-X710/XL710 Based SFP+ adapters support passive QSFP+ Direct Attach cables.
-Intel recommends using Intel optics and cables. Other modules may function
-but are not validated by Intel. Contact Intel for supported media types.
+ Intel TRIPLE RATE 1G/10G/40G QSFP+ SR (bailed) E40GQSFPSR
+ Intel TRIPLE RATE 1G/10G/40G QSFP+ LR (bailed) E40GQSFPLR
+ QSFP+ 1G speed is not supported on XL710 based devices.
+SFP+ and QSFP+ Direct Attach Cables
+ X710/XL710 based SFP+/QSFP+ adapters support passive SFP+/QSFP+ Direct
+ Attach cables. Intel recommends using Intel(R) Ethernet SFP+/QSFP+ Twinaxial
+ Cables . Other cables may function but are not validated by Intel. Contact
+ Intel for supported media types.
+ X710 based SFP+ adapters support all passive and active limiting direct
+ attach cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4
+ specifications.
+ Intel(R) Ethernet SFP+ Twinaxial Cable XDACBL1M, XDACBL3M, XDACBL5M
+ Intel(R) Ethernet QSFP+ Breakout Cable X4DACBL1, X4DACBL3, X4DACBL5
+ Intel(R) Ethernet QSFP+ Twinaxial Cable XLDACBL1, XLDACBL3, XLDACBL5
+
+SFP+ Devices for XXV710-Based Adapters
+--------------------------------------
+ Intel(R) Ethernet SFP28 SR Optic E25GSFP28SR
+ Intel(R) Ethernet SFP28 Twinaxial Cable XXVDACBL1M, XXVDACBL2M, XXVDACBL3M
+ Intel(R) Ethernet QSFP28 to SFP28 Twinaxial XXV4DACBL1M, XXV4DACBL2M, XXV4DACBL3M
+ Breakout Cable
================================================================================
max_vfs
-------
+This parameter adds support for SR-IOV. It causes the driver to spawn up to
+max_vfs worth of virtual functions.
Valid Range:
1-32 (X710 based devices)
1-64 (XL710 based devices)
+
NOTE: This parameter is only used on kernel 3.7.x and below. On kernel 3.8.x
and above, use sysfs to enable VFs. For example:
#echo $num_vf_enabled > /sys/class/net/$dev/device/sriov_numvfs //enable VFs
#echo 0 > /sys/class/net/$dev/device/sriov_numvfs //disable VFs
+
The parameters for the driver are referenced by position. Thus, if you have a
dual port adapter, or more than one adapter in your system, and want N virtual
functions per port, you must specify a number for each port with each parameter
separated by a comma. For example:
- modprobe i40e max_vfs=4,1
+
+ modprobe i40e max_vfs=4
+
+This will spawn 4 VFs on the first port.
+
+ modprobe i40e max_vfs=2,4
+
+This will spawn 2 VFs on the first port and 4 VFs on the second port.
+
NOTE: Caution must be used in loading the driver with these parameters.
Depending on your system configuration, number of slots, etc., it is impossible
to predict in all cases where the positions would be on the command line.
-This parameter adds support for SR-IOV. It causes the driver to spawn up to
-max_vfs worth of virtual functions.
+
+NOTE: Neither the device nor the driver control how VFs are mapped into config
+space. Bus layout will vary by operating system. On operating systems that
+support it, you can check sysfs to find the mapping.
+
+
Some hardware configurations support fewer SR-IOV instances, as the whole
XL710 controller (all functions) is limited to 128 SR-IOV interfaces in total.
+
NOTE: When SR-IOV mode is enabled, hardware VLAN
filtering and VLAN tag stripping/insertion will remain enabled. Please remove
the old VLAN filter before the new VLAN filter is added. For example,
Where eth2 is a VF interface in the VM
NOTE: By default, the ethtool priv-flag vf-true-promisc-support is set to
- “off”,meaning that promiscuous mode for the VF will be limited. To set the
+ "off",meaning that promiscuous mode for the VF will be limited. To set the
promiscuous mode for the VF to true promiscuous and allow the VF to see
all ingress traffic, use the following command.
- #ethtool –set-priv-flags p261p1 vf-true-promisc-support on
+ #ethtool -set-priv-flags p261p1 vf-true-promisc-support on
The vf-true-promisc-support priv-flag does not enable promiscuous mode;
rather, it designates which type of promiscuous mode (limited or true)
you will get when you enable promiscuous mode using the ip link commands
Now add a VLAN interface on the VF interface.
#ip link add link eth2 name eth2.100 type vlan id 100
-Note that the order in which you set the VF to promiscuous mode and add
+Note that the order in which you set the VF?to promiscuous mode and add
the VLAN interface does not matter (you can do either first). The end result
in this example is that the VF will get all traffic that is tagged with
VLAN 100.
- Enables tight control on routing a flow in the platform.
- Matches flows and CPU cores for flow affinity.
- Supports multiple parameters for flexible flow classification and load
- balancing (in SFP mode only).
+ balancing (in SFP mode only).
NOTES:
- An included script (set_irq_affinity) automates setting the IRQ to
CPU affinity.
- - The Linux i40e driver supports the following flow types: IPv4, TCPv4, and
+ - The Linux i40e driver supports the following flow types: IPv4, TCPv4, and
UDPv4. For a given flow type, it supports valid combinations of
- IP addresses (source or destination) and UDP/TCP ports (source and
+ IP addresses (source or destination) and UDP/TCP ports (source and
destination). For example, you can supply only a source IP address,
a source IP address and a destination port, or any combination of one or
more of these four parameters.
# ethtool -N ethX flow-type tcp4 src-ip 192.168.10.1 dst-ip \
192.168.10.2 src-port 2000 dst-port 2001 action 2 [loc 1]
- To set a filter using only the source and destination IP address:
+ To set a filter using only the source and destination IP?address:
# ethtool -N ethX flow-type tcp4 src-ip 192.168.10.1 dst-ip \
192.168.10.2 action 2 [loc 1]
--------------------
On a complex network that supports multiple types of traffic (such as for
storage as well as cloud), cloud filter support allows you to send one type of
- traffic (for example, the storage traffic) to the Physical Function (PF) and
+ traffic (for example, the storage traffic) to the Physical Function (PF) and
another type (say, the cloud traffic) to a Virtual Function (VF). Because cloud
networks are typically VXLAN/Geneve-based, you can define a cloud filter to
identify VXLAN/Geneve packets and send them to a queue in the VF to be
- The "action -1" option, which drops matching packets in regular Flow
Director filters, is not available to drop packets when used with
cloud filters.
- - For IPv4 and ether flow-types, cloud filters cannot be used for TCP or
+ - For IPv4 and ether flow-types, cloud filters cannot be used for TCP?or
UDP filters.
- Cloud filters can be used as a method for implementing queue splitting in
the PF.
L3 filters
Application Destination IP
-Use ethtool’s flow director and user defined (user-def) options to define
+Use ethtool's flow director and user defined (user-def) options to define
cloud filters for tunneled packets (VF) and L3 filters for non-tunneled
packets (PF or VF). In this case, the user-def field specifies that a cloud
filter is being programmed instead of a Flow Director filter. Note that this
For L3 filters (non-tunneled packets):
- - “user-def 0xffffffff00000002” (no Tenant ID/VNI specified in the upper
+ - "user-def 0xffffffff00000002" (no Tenant ID/VNI specified in the upper
32 bits of the user-def field and send to VF id 2)
- Only L3 parameters (src-IP, dst-IP) are considered
ethtool -N p4p2 flow-type ip4 src-ip 192.168.42.13 dst-ip 192.168.42.33 /
src-port 12344 dst-port 12344 user-def 0xffffffff00000001 loc 3
Redirect traffic coming from 192.168.42.13 port 12344 with destination
- 192.168.42.33 port 12344 into VF id 1, and call this “rule 3”
+ 192.168.42.33 port 12344 into VF id 1, and call this ?rule 3?
For cloud filters (tunneled packets):
if specified or required.
- The lower 32 bits of the 'user-def' field can be used to specify the
VF ID. If the ID is greater than the maximum number of VFs currently
- enabled then the ID will default back to the main VSI.
- - Cloud filters can be defined with inner MAC, outer MAC, inner IP address,
+ enabled then the ID will default back to the main VSI.
+ - Cloud filters can be defined with inner MAC, outer MAC, inner IP address,
inner VLAN, and VNI as part of the cloud tuple. Cloud filters filter on
- destination (not source) MAC and IP. The destination and source MAC
+ destination (not source) MAC and IP. The destination and source MAC
address fields in the ethtool command are overloaded as dst = outer,
- src = inner MAC address to facilitate tuple definition for a cloud filter.
+ src = inner MAC address to facilitate tuple definition for a cloud filter.
- The 'loc' parameter specifies the rule number of the filter as being
stored in the base driver
Example:
ethtool -N p4p2 flow-type ip4 src-ip 192.168.42.13 dst-ip 192.168.42.33 /
- src-port 12344 dst-port 12344 user-def 0x2200000001 loc 38
+ src-port 12344 dst-port 12344 user-def 0x0000000001 loc 38
Redirect traffic on VXLAN using tunnel id 34 (hex 0x22) coming from
192.168.42.13 port 12344 with destination 192.168.42.33 port 12344 into
- VF id 1, and call this “rule 38”
+ VF id 1, and call this "rule 38"
NOTE: If the VF id given is larger than the number of active VFs (e.g.
if you set num_vfs to 8 and use VF id 12 in the ethtool command) the
traffic will be redirected to the PF rather than to the VF.
To see the list of filters currently present:
ethtool <-u|-n> ethX
- NOTE: For cloud filters in which the specified VF is greater than
+ NOTE: For cloud filters in which the specified VF is greater than
the number of VFs supported, the cloud filter will send traffic
to the PF. However, the driver does not store the specified VF
number, so in this case the ethtool -n command will display
/etc/sysconfig/network-scripts/ifcfg-eth<x> for RHEL or to the file
/etc/sysconfig/network/<config_file> for SLES.
+
+
NOTES:
- The maximum MTU setting for Jumbo Frames is 9706. This value coincides
with the maximum Jumbo Frames size of 9728 bytes.
when allocating receive packets.
+
ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
n Hash on bytes 2 and 3 of the Layer 4 header of the rx packet.
+Speed and Duplex Configuration
+------------------------------
+
+In addressing speed and duplex configuration issues, you need to
+distinguish between copper-based adapters and fiber-based adapters.
+
+In the default mode, an Intel(R) Network Adapter using copper connections
+will attempt to auto-negotiate with its link partner to determine the best
+setting. If the adapter cannot establish link with the link partner using
+auto-negotiation, you may need to manually configure the adapter and link
+partner to identical settings to establish link and pass packets. This
+should only be needed when attempting to link with an older switch that
+does not support auto-negotiation or one that has been forced to a specific
+speed or duplex mode. Your link partner must match the setting you choose.
+1 Gbps speeds and higher cannot be forced. Use the autonegotiation
+advertising setting to manually set devices for 1 Gbps and higher.
+
+NOTE: You cannot set the speed for Intel(R) Ethernet Network Adapter
+XXV710 based devices.
+
+
+
+
+
+
+Speed, duplex, and autonegotiation advertising are configured through
+the ethtool* utility. ethtool is included with all versions of Red Hat
+after Red Hat 7.2. For the latest version, download and install
+ethtool from the following website:
+
+ http://ftp.kernel.org/pub/software/network/ethtool/
+
+
+Caution: Only experienced network administrators should force speed and
+duplex or change autonegotiation advertising manually. The settings at
+the switch must always match the adapter settings. Adapter performance
+may suffer or your adapter may not operate if you configure the adapter
+differently from your switch.
+
+An Intel(R) Network Adapter using fiber-based connections, however, will not
+attempt to auto-negotiate with its link partner since those adapters operate
+only in full duplex and only at their native speed.
+
+
NAPI
----
NAPI (Rx polling mode) is supported in the i40e driver.
ethtool:
ethtool -A eth? autoneg off rx on tx on
+
+
+
NOTE: You must have a flow control capable link partner.
+
+
RSS Hash Flow
-------------
Allows you to set the hash bytes per flow type and any combination of one or
more options for Receive Side Scaling (RSS) hash byte configuration.
-#ethtool –N <dev> rx-flow-hash <type> <option>
+#ethtool -N <dev> rx-flow-hash <type> <option>
Where <type> is:
tcp4 signifying TCP over IPv4
------------------------------------------------------------
Precision Time Protocol (PTP) is used to synchronize clocks in a computer
-network and is supported in the i40e driver.
+network. PTP support varies among Intel devices that support this driver.
+Use "ethtool -T <netdev name>" to get a definitive list of PTP capabilities
+supported by the device.
operating system. This reduces CPU utilization.
VXLAN offloading is controlled by the tx and rx checksum offload options
-provided by ethtool. That is, if tx checksum offload is enabled, and the adapter
-has the capability, VXLAN offloading is also enabled. If rx checksum offload is
-enabled, then the VXLAN packets rx checksum will be offloaded, unless the module
-parameter vxlan_rx=0,0 was used to specifically disable the VXLAN rx offload.
-
-VXLAN Overlay HW Offloading is enabled by default. To view and configure VXLAN
-on a VXLAN-overlay offload enabled device, use the following
-command:
-
- # ethtool -k ethX
- (This command displays the offloads and their current state.)
+provided by ethtool. That is, if tx checksum offload is enabled, and the
+adapter has the capability, VXLAN offloading is also enabled.
-i40e support for VXLAN HW offloading is dependent on
+Support for VXLAN HW offloading is dependent on
kernel support of the HW offloading features.
-For more information on configuring your network for overlay HW offloading
-support, refer to the Intel Technical Brief, "Creating Overlay Networks
-Using Intel Ethernet Converged Network Adapters" (Intel Networking Division,
-August 2013):
-
-http://www.intel.com/content/dam/www/public/us/en/documents/technology-briefs/
-overlay-networks-using-converged-network-adapters-brief.pdf
-
Multiple Functions per Port
---------------------------
optimization is required we recommend experimenting with the following
settings.
+Virtualized Environments:
+
+1. Disable XPS on both ends by using the included virt_perf_default script
+ or by running the following command as root:
+ for file in `ls /sys/class/net/<ethX>/queues/tx-*/xps_cpus`;
+ do echo 0 > $file; done
+
+2. Using the appropriate mechanism (vcpupin) in the vm, pin the cpu's to
+ individual lcpu's, making sure to use a set of cpu's included in the
+ device's local_cpulist: /sys/class/net/<ethX>/device/local_cpulist.
+
+3. Configure as many rx/tx queues in the VM as available. Do not rely on
+ the default setting of 1.
+
+
+Non-virtualized Environments
+
Pin the adapter's IRQs to specific cores by disabling the irqbalance service
and using the included set_irq_affinity script. Please see the script's help
text for further options.
- Setting rx-usecs and tx-usecs to 125 will limit interrupts to about 8000
interrupts per second per queue.
- # ethtool -C <interface> adaptive-rx off adaptive-tx off rx-usecs 125
+ # ethtool -C <interface> adaptive-rx off adaptive-tx off rx-usecs 125
tx-usecs 125
For lower CPU utilization: Disable Adaptive ITR and lower rx and tx interrupts
- Setting rx-usecs and tx-usecs to 250 will limit interrupts to about 4000
interrupts per second per queue.
- # ethtool -C <interface> adaptive-rx off adaptive-tx off rx-usecs 250
+ # ethtool -C <interface> adaptive-rx off adaptive-tx off rx-usecs 250
tx-usecs 250
For lower latency: Disable Adaptive ITR and ITR by setting rx and tx to 0
using ethtool.
- # ethtool -C <interface> adaptive-rx off adaptive-tx off rx-usecs 0
+ # ethtool -C <interface> adaptive-rx off adaptive-tx off rx-usecs 0
tx-usecs 0
----------------------------
+depmod warning messages about unknown symbol during installation
+----------------------------------------------------------------
+
+During driver installation, you may see depmod warning messages referring
+to unknown symbols i40e_register_client and i40e_unregister_client. These
+messages are informational only; no user action is required. The installation
+should complete successfully.
+
+
+Error: <ifname> selects TX queue XX but real number of TX queues is YY
+----------------------------------------------------------------------
+
+When configuring the number of queues under heavy traffic load, you may
+see an error message stating "<ifname> selects TX queue XX, but real number
+of TX queues is YY". This message is informational only and does not affect
+functionality.
+
+
+Windows Server 2016 Does Not Work as a Guest OS on Older RHEL and SLES KVMs
+---------------------------------------------------------------------------
+
+Microsoft* Windows Server* 2016 does not work as a guest operating system
+on the KVM hypervisor version included with Red Hat* Enterprise Linux* (RHEL)
+version 6.8 and Suse* Linux Enterprise Server (SLES) version 11.4. Windows
+Server 2016 does work as a guest OS on RHEL 7.2 and SLES 12.1.
+
+
Fixing Performance Issues When Using IOMMU in Virtualized Environments
----------------------------------------------------------------------
The IOMMU feature of the processor prevents I/O devices from accessing memory
in the IOMMU).
If you experience significant performance issues with IOMMU, try using it in
-“passthrough” mode by adding the following to the kernel boot command line:
+?passthrough? mode by adding the following to the kernel boot command line:
intel_iommu=on iommu=pt
NOTE: This mode enables remapping for assigning devices to VMs, providing
UDP Stress Test Dropped Packet Issue
------------------------------------
-Under small packet UDP stress with the i40edriver, the system may
+Under small packet UDP stress with the i40e driver, the system may
drop UDP packets due to socket buffers being full. Setting the driver Flow
Control variables to the minimum may resolve the issue. You may also try
increasing the kernel's default buffer sizes by changing the values in
or the Intel Wired Networking project hosted by Sourceforge at:
http://sourceforge.net/projects/e1000
+
If an issue is identified with the released source code on a supported
kernel with a supported adapter, email the specific information related to the
issue to e1000-devel@lists.sf.net.
+
================================================================================
--- /dev/null
+32879 3 i40e-1.6.42/i40e.7
+55011 50 i40e-1.6.42/README
+45713 7 i40e-1.6.42/pci.updates
+52651 11 i40e-1.6.42/i40e.spec
+02733 18 i40e-1.6.42/COPYING
+47165 364 i40e-1.6.42/src/i40e_register.h
+56893 14 i40e-1.6.42/src/i40e_virtchnl.h
+36073 9 i40e-1.6.42/src/i40e_dcb_nl.c
+28578 76 i40e-1.6.42/src/i40e_adminq_cmd.h
+33761 45 i40e-1.6.42/src/i40e_nvm.c
+52885 51 i40e-1.6.42/src/i40e_type.h
+56103 20 i40e-1.6.42/src/i40e_prototype.h
+25766 55 i40e-1.6.42/src/kcompat.c
+58605 4 i40e-1.6.42/src/i40e_status.h
+07031 168 i40e-1.6.42/src/i40e_common.c
+25757 6 i40e-1.6.42/src/i40e_lan_hmc.h
+25014 2 i40e-1.6.42/src/i40e_diag.h
+27793 4 i40e-1.6.42/src/i40e_fcoe.h
+25701 4 i40e-1.6.42/src/i40e_helper.h
+56270 8 i40e-1.6.42/src/i40e_hmc.h
+65130 139 i40e-1.6.42/src/i40e_ethtool.c
+05858 30 i40e-1.6.42/src/i40e_adminq.c
+24070 2 i40e-1.6.42/src/i40e_devids.h
+13309 5 i40e-1.6.42/src/i40e_diag.c
+32391 36 i40e-1.6.42/src/i40e.h
+16694 17 i40e-1.6.42/src/i40e_txrx.h
+23499 25 i40e-1.6.42/src/i40e_ptp.c
+60199 35 i40e-1.6.42/src/i40e_lan_hmc.c
+15948 27 i40e-1.6.42/src/i40e_dcb.c
+29059 161 i40e-1.6.42/src/kcompat.h
+42183 346 i40e-1.6.42/src/i40e_main.c
+51264 6 i40e-1.6.42/src/Makefile
+41749 48 i40e-1.6.42/src/i40e_fcoe.c
+51731 5 i40e-1.6.42/src/i40e_osdep.h
+03889 3 i40e-1.6.42/src/i40e_alloc.h
+46374 81 i40e-1.6.42/src/i40e_debugfs.c
+44588 1 i40e-1.6.42/src/Module.supported
+39178 6 i40e-1.6.42/src/i40e_virtchnl_pf.h
+19614 5 i40e-1.6.42/src/i40e_adminq.h
+10789 12 i40e-1.6.42/src/common.mk
+06090 80 i40e-1.6.42/src/i40e_virtchnl_pf.c
+49924 92 i40e-1.6.42/src/i40e_txrx.c
+12466 6 i40e-1.6.42/src/i40e_dcb.h
+03386 11 i40e-1.6.42/src/i40e_hmc.c
+33977 7 i40e-1.6.42/scripts/set_irq_affinity
+20875 2 i40e-1.6.42/scripts/dump_tables
+49876 5 i40e-1.6.42/scripts/virt_perf_default
This driver is intended for \fB2.6.32\fR and newer kernels.
This driver includes support for any 64 bit Linux supported system,
including Itanium(R)2, x86_64, PPC64,ARM, etc.
+
.LP
This driver is only supported as a loadable module at this time. Intel is
not supplying patches against the kernel source to allow for static linking of
the drivers.
+
For questions related to hardware requirements, refer to the documentation
supplied with your Intel adapter. All hardware requirements listed apply to
use with Linux.
- This driver will attempt to use multiple page sized buffers to receive
each jumbo packet. This should help to avoid buffer starvation issues
when allocating receive packets.
+
See the section "Jumbo Frames" in the Readme.
.SH SUPPORT
.LP
If an issue is identified with the released source code on a supported
kernel with a supported adapter, email the specific information related to the
issue to e1000-devel@lists.sf.net.
+
.LP
Name: i40e
-Summary: Intel(R) Ethernet Connection XL710 Linux Driver
-Version: 1.5.19
+Summary: Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+Version: 1.6.42
Release: 1
Source: %{name}-%{version}.tar.gz
Vendor: Intel Corporation
Requires: kernel, fileutils, findutils, gawk, bash
%description
-This package contains the Linux driver for the Intel(R) Ethernet Connection XL710 Family of devices.
+This package contains the Intel(R) 40-10 Gigabit Ethernet Connection Network Driver.
%prep
%setup
make -C src
%install
-make -C src INSTALL_MOD_PATH=%{buildroot} MANDIR=%{_mandir} install
+make -C src INSTALL_MOD_PATH=%{buildroot} MANDIR=%{_mandir} rpm
# Append .new to driver name to avoid conflict with kernel RPM
cd %{buildroot}
find lib -name "i40e.*o" -exec mv {} {}.new \; \
if [ "%{pcitable}" != "/dev/null" ]; then
echo "original pcitable saved in /usr/local/share/%{name}";
fi
-for k in $(sed 's/\/lib\/modules\/\([0-9a-zA-Z_\.\-\+]*\).*/\1/' $FL) ;
+for k in $(sed 's#/lib/modules/\([0-9a-zA-Z.+_-]*\).*$#\1#' $FL) ;
do
d_drivers=/lib/modules/$k
d_usr=/usr/local/share/%{name}/$k
done
# Check if kernel version rpm was built on IS the same as running kernel
-BK_LIST=$(sed 's/\/lib\/modules\/\([0-9a-zA-Z_\.\-\+]*\).*/\1/' $FL)
+BK_LIST=$(sed 's#/lib/modules/\([0-9a-zA-Z.+_-]*\).*$#\1#' $FL) ;
MATCH=no
for i in $BK_LIST
do
uname -r | grep BOOT || /sbin/depmod -a > /dev/null 2>&1 || true
echo "Updating initrd..."
-dracut --force
+# Decide which initrd update utility to use.
+# Default is dracut but we'll try mkinitrd if that's not found.
+which dracut >/dev/null 2>&1
+if [ $? -eq 0 ]; then
+ echo "Using dracut to update initrd..."
+ initrd_cmd="dracut --force"
+else
+ which mkinitrd >/dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ echo "Using mkinitrd to update initrd..."
+ initrd_cmd="mkinitrd"
+ else
+ echo "Unable to find initrd update utility."
+ echo "You must update your initrd for changes to take place."
+ exit -1
+ fi
+fi
+
+# Do the initrd update and report success or failure.
+if [ "$initrd_cmd" != "" ]; then
+ eval "$initrd_cmd"
+ if [ $? -ne 0 ]; then
+ echo "Failed to update initrd."
+ echo "You must update your initrd for changes to take place."
+ exit -1
+ else
+ echo "Successfully updated initrd."
+ fi
+fi
%preun
# If doing RPM un-install
################################################################################
#
# Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
-# Copyright(c) 2013 - 2016 Intel Corporation.
+# Copyright(c) 2013 - 2017 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
8086 0008 Ethernet Converged Network Adapter X710-2
8086 0009 Ethernet Controller X710 for 10GbE SFP+
8086 000a Ethernet Controller X710 for 10GbE SFP+
+ 8086 000b Ethernet Server Adapter X710-DA2 for OCP
8086 000d Ethernet Controller X710 for 10GbE SFP+
8086 4005 Ethernet Controller X710 for 10GbE SFP+
8086 4006 Ethernet Controller X710 for 10GbE SFP+
1028 0000 Ethernet 10G X710-k bNDC
1028 1f98 Ethernet 10G 4P X710-k bNDC
1028 1f9e Ethernet 10G 2P X710-k bNDC
+ 1590 0000 Ethernet 10Gb 2-port 563i Adapter
+ 1590 00f8 Ethernet 10Gb 2-port 563i Adapter
8086 0000 Ethernet Converged Network Adapter XL710-Q2
1583 Ethernet Controller XL710 for 40GbE QSFP+
1028 0000 Ethernet 40G 2P XL710 QSFP+ rNDC
108e 0000 Ethernet Controller X710 for 10GBASE-T
108e 4857 Ethernet Controller X710 for 10GBASE-T
1587 Ethernet Controller XL710 for 20GbE backplane
- 103c 0000 Flex-20 20Gb 2-port 660FLB Adapter
- 103c 22fe Flex-20 20Gb 2-port 660FLB Adapter
+ 103c 0000 Eth 10/20Gb 2p 660FLB Adptr
+ 103c 22fe Eth 10/20Gb 2p 660FLB Adptr
1588 Ethernet Controller XL710 for 20GbE backplane
- 103c 0000 Flex-20 20Gb 2-port 660M Adapter
- 103c 22ff Flex-20 20Gb 2-port 660M Adapter
+ 103c 0000 Eth 10/20Gb 2p 660M Adptr
+ 103c 22ff Eth 10/20Gb 2p 660M Adptr
1589 Ethernet Controller X710/X557-AT 10GBASE-T
108e 0000 Quad Port 10GBase-T Adapter
108e 7b1c Quad Port 10GBase-T Adapter
8086 0000 Ethernet Converged Network Adapter X710-T
8086 0001 Ethernet Converged Network Adapter X710-T4
8086 0002 Ethernet Converged Network Adapter X710-T4
+ 8086 0003 Ethernet Converged Network Adapter X710-T
+ 8086 00A0 Ethernet Converged Network Adapter X710-T4
+ 158a Ethernet Controller XXV710 for 25GbE backplane
+ 158b Ethernet Controller XXV710 for 25GbE SFP28
+ 8086 0000 Ethernet Network Adapter XXV710
+ 8086 0001 Ethernet Network Adapter XXV710-2
+ 8086 0002 Ethernet Network Adapter XXV710-2
+ 8086 0003 Ethernet Network Adapter XXV710-1
+ 8086 0004 Ethernet Network Adapter XXV710-1
+ 8086 0007 Ethernet Network Adapter OCP XXV710-1
+ 8086 0008 Ethernet Network Adapter OCP XXV710-1
37ce Ethernet Connection X722 for 10GbE backplane
+ 1590 0215 Ethernet 10Gb 2-port 568i Adapter
+ 17aa 4023 Ethernet Connection X722 for 10GbE backplane
37cf Ethernet Connection X722 for 10GbE QSFP+
37d0 Ethernet Connection X722 for 10GbE SFP+
+ 17aa 4020 Ethernet Connection X722 for 10GbE SFP+
+ 17aa 4021 Ethernet Connection X722 for 10GbE SFP+
+ 17aa 4022 Ethernet Connection X722 for 10GbE SFP+
37d1 Ethernet Connection X722 for 1GbE
+ 1590 0216 Ethernet 1Gb 2-port 368i Adapter
+ 1590 0217 Ethernet 1Gb 2-port 368FLR-MMT Adapter
+ 17aa 4020 Ethernet Connection X722 for 1GbE
+ 17aa 4021 Ethernet Connection X722 for 1GbE
37d2 Ethernet Connection X722 for 10GBASE-T
+ 1590 0218 Ethernet 10Gb 2-port 568FLR-MMT Adapter
+ 1590 0219 Ethernet 10Gb 2-port 568FLR-MMSFP+ Adapter
+ 17aa 4020 Ethernet Connection X722 for 10GBASE-T
+ 17aa 4021 Ethernet Connection X722 for 10GBASE-T
+ 17aa 4022 Ethernet Connection X722 for 10GBASE-T
37d3 Ethernet Connection X722 for 10GbE SFP+
- 37d4 Ethernet Connection X722 for 10GbE QSFP+
--- /dev/null
+#!/bin/bash
+#
+# Copyright (c) 2016, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# to get help:
+# virt-perf-default
+
+usage()
+{
+ echo
+ echo "Usage: $0 [ethX] <[ethY]>"
+ echo " Ex: $0 eth0"
+ echo
+ exit 1
+}
+
+num='^[0-9]+$'
+# Vars
+AFF=$1
+shift
+
+case "$AFF" in
+ [0-9]*) ;;
+ -h|--help) usage ;;
+ "") usage ;;
+ *) IFACES=$AFF && AFF=all ;; # Backwards compat mode
+esac
+
+# append the interfaces listed to the string with spaces
+while [ "$#" -ne "0" ] ; do
+ IFACES+=" $1"
+ shift
+done
+
+# for now the user must specify interfaces
+if [ -z "$IFACES" ]; then
+ usage
+ exit 1
+fi
+
+# support functions
+
+disable_xps()
+{
+ VEC=$core
+ if [ $VEC -ge 32 ]
+ then
+ MASK_FILL=""
+ MASK_ZERO="00000000"
+ let "IDX = $VEC / 32"
+ for ((i=1; i<=$IDX;i++))
+ do
+ MASK_FILL="${MASK_FILL},${MASK_ZERO}"
+ done
+
+ let "VEC -= 32 * $IDX"
+ MASK_TMP=$((1<<$VEC))
+ MASK=$(printf "%X%s" $MASK_TMP $MASK_FILL)
+ else
+ MASK_TMP=$((1<<$VEC))
+ MASK=$(printf "%X" $MASK_TMP)
+ fi
+
+ MASK=0
+ printf "%s %d %s -> /sys/class/net/%s/queues/tx-%d/xps_cpus\n" $IFACE $core $MASK $IFACE $((n-1))
+ printf "%s" $MASK > /sys/class/net/$IFACE/queues/tx-$((n-1))/xps_cpus
+}
+
+# Allow usage of , or -
+#
+parse_range () {
+ RANGE=${@//,/ }
+ RANGE=${RANGE//-/..}
+ LIST=""
+ for r in $RANGE; do
+ # eval lets us use vars in {#..#} range
+ [[ $r =~ '..' ]] && r="$(eval echo {$r})"
+ LIST+=" $r"
+ done
+ echo $LIST
+}
+
+# Affinitize interrupts
+#
+setaff()
+{
+ CORES=$(parse_range $CORES)
+ ncores=$(echo $CORES | wc -w)
+ n=1
+
+ # this script only supports interrupt vectors in pairs,
+ # modification would be required to support a single Tx or Rx queue
+ # per interrupt vector
+
+ queues="${IFACE}-.*TxRx"
+
+ irqs=$(grep "$queues" /proc/interrupts | cut -f1 -d:)
+ [ -z "$irqs" ] && irqs=$(grep $IFACE /proc/interrupts | cut -f1 -d:)
+ [ -z "$irqs" ] && irqs=$(for i in `ls -Ux /sys/class/net/$IFACE/device/msi_irqs` ;\
+ do grep "$i:.*TxRx" /proc/interrupts | grep -v fdir | cut -f 1 -d : ;\
+ done)
+ [ -z "$irqs" ] && echo "Error: Could not find interrupts for $IFACE"
+
+ echo "IFACE CORE MASK -> FILE"
+ echo "======================="
+ for IRQ in $irqs; do
+ [ "$n" -gt "$ncores" ] && n=1
+ j=1
+ # much faster than calling cut for each
+ for i in $CORES; do
+ [ $((j++)) -ge $n ] && break
+ done
+ core=$i
+ disable_xps
+ ((n++))
+ done
+}
+
+# now the actual useful bits of code
+
+# these next 2 lines would allow script to auto-determine interfaces
+#[ -z "$IFACES" ] && IFACES=$(ls /sys/class/net)
+#[ -z "$IFACES" ] && echo "Error: No interfaces up" && exit 1
+
+# echo IFACES is $IFACES
+
+CORES=$(</sys/devices/system/cpu/online)
+[ "$CORES" ] || CORES=$(grep ^proc /proc/cpuinfo | cut -f2 -d:)
+
+# Core list for each node from sysfs
+node_dir=/sys/devices/system/node
+for i in $(ls -d $node_dir/node*); do
+ i=${i/*node/}
+ corelist[$i]=$(<$node_dir/node${i}/cpulist)
+done
+
+for IFACE in $IFACES; do
+ # echo $IFACE being modified
+
+ dev_dir=/sys/class/net/$IFACE/device
+ [ -e $dev_dir/numa_node ] && node=$(<$dev_dir/numa_node)
+ [ "$node" ] && [ "$node" -gt 0 ] || node=0
+
+ case "$AFF" in
+ local)
+ CORES=${corelist[$node]}
+ ;;
+ remote)
+ [ "$rnode" ] || { [ $node -eq 0 ] && rnode=1 || rnode=0; }
+ CORES=${corelist[$rnode]}
+ ;;
+ one)
+ [ -n "$cnt" ] || cnt=0
+ CORES=$cnt
+ ;;
+ all)
+ CORES=$CORES
+ ;;
+ custom)
+ echo -n "Input cores for $IFACE (ex. 0-7,15-23): "
+ read CORES
+ ;;
+ [0-9]*)
+ CORES=$AFF
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
+
+ # call the worker function
+ setaff
+done
################################################################################
#
# Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
-# Copyright(c) 2013 - 2016 Intel Corporation.
+# Copyright(c) 2013 - 2017 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# i40e does not support building on kernels older than 2.6.32
$(call minimum_kver_check,2,6,32)
-############################
-# Module Install Directory #
-############################
-
-# Default to using updates/drivers/net/ethernet/intel/ path, since depmod since
-# v3.1 defaults to checking updates folder first, and only checking kernels/
-# and extra afterwards. We use updates instead of kernel/* due to desire to
-# prevent over-writing built-in modules files.
-INSTALL_MOD_DIR ?= updates/drivers/net/ethernet/intel/${DRIVER}
-
-######################
-# Kernel Build Macro #
-######################
-
-# kernel build function
-# ${1} is the kernel build target
-# ${2] may contain any extra rules to pass directly to the sub-make process
-kernelbuild = ${MAKE} $(if ${GCC_I_SYS},CC:="${GCC_I_SYS}") \
- $(if ${EXTRA_CFLAGS},ccflags-y:="${EXTRA_CFLAGS}") \
- -C ${KSRC} \
- $(if ${KOBJ},O:=${KOBJ}) \
- CONFIG_${DRIVER_UPPERCASE}=m \
- M:=$(call readlink,.) \
- $(if ${INSTALL_MOD_PATH},INSTALL_MOD_PATH:=${INSTALL_MOD_PATH}) \
- INSTALL_MOD_DIR:=${INSTALL_MOD_DIR} \
- ${2} ${1};
-
###############
# Build rules #
###############
@echo "Copying manpages..."
@install -D -m 644 ${DRIVER}.${MANSECTION}.gz ${INSTALL_MOD_PATH}${MANDIR}/man${MANSECTION}/${DRIVER}.${MANSECTION}.gz
@echo "Installing modules..."
- @$(call kernelbuild,modules_install)
+ @+$(call kernelbuild,modules_install)
@echo "Running depmod..."
@$(call cmd_depmod)
ifeq (${cmd_initrd},)
-@$(call cmd_initrd)
endif
+# Target used by rpmbuild spec file
+rpm: default manfile
+ @install -D -m 644 ${DRIVER}.${MANSECTION}.gz ${INSTALL_MOD_PATH}${MANDIR}/man${MANSECTION}/${DRIVER}.${MANSECTION}.gz
+ @install -D -m 644 ${DRIVER}.ko ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_MOD_DIR}/${DRIVER}.ko
+
uninstall:
rm -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_MOD_DIR}/${DRIVER}.ko;
$(call cmd_depmod)
+ifeq (${cmd_initrd},)
+ @echo "Unable to update initrd. You may need to do this manually."
+else
+ @echo "Updating initrd..."
+ -@$(call cmd_initrd)
+endif
if [ -e ${INSTALL_MOD_PATH}${MANDIR}/man${MANSECTION}/${DRIVER}.${MANSECTION}.gz ] ; then \
rm -f ${INSTALL_MOD_PATH}${MANDIR}/man${MANSECTION}/${DRIVER}.${MANSECTION}.gz ; \
fi;
################################################################################
#
# Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
-# Copyright(c) 2013 - 2016 Intel Corporation.
+# Copyright(c) 2013 - 2017 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
cmd_initrd := $(shell \
if which dracut > /dev/null 2>&1 ; then \
echo "dracut --force"; \
+ elif which update-initramfs > /dev/null 2>&1 ; then \
+ echo "update-initramfs -u"; \
fi )
#####################
# Kernel Search Path
# All the places we look for kernel source
-KSP := /lib/modules/${BUILD_KERNEL}/build \
- /lib/modules/${BUILD_KERNEL}/source \
+KSP := /lib/modules/${BUILD_KERNEL}/source \
+ /lib/modules/${BUILD_KERNEL}/build \
/usr/src/linux-${BUILD_KERNEL} \
/usr/src/linux-$(${BUILD_KERNEL} | sed 's/-.*//') \
/usr/src/kernel-headers-${BUILD_KERNEL} \
EXTRA_CFLAGS += -DLINUX_VERSION_CODE=${LINUX_VERSION_CODE}
endif
+# Determine SLE_LOCALVERSION_CODE for SuSE SLE >= 11 (needed by kcompat)
+# This assumes SuSE will continue setting CONFIG_LOCALVERSION to the string
+# appended to the stable kernel version on which their kernel is based with
+# additional versioning information (up to 3 numbers), a possible abbreviated
+# git SHA1 commit id and a kernel type, e.g. CONFIG_LOCALVERSION=-1.2.3-default
+# or CONFIG_LOCALVERSION=-999.gdeadbee-default
+ifeq (1,$(shell ${CC} -E -dM ${CONFIG_FILE} 2> /dev/null |\
+ grep -m 1 CONFIG_SUSE_KERNEL | awk '{ print $$3 }'))
+
+ifneq (10,$(shell ${CC} -E -dM ${CONFIG_FILE} 2> /dev/null |\
+ grep -m 1 CONFIG_SLE_VERSION | awk '{ print $$3 }'))
+
+ LOCALVERSION := $(shell ${CC} -E -dM ${CONFIG_FILE} 2> /dev/null |\
+ grep -m 1 CONFIG_LOCALVERSION | awk '{ print $$3 }' |\
+ cut -d'-' -f2 | sed 's/\.g[[:xdigit:]]\{7\}//')
+ LOCALVER_A := $(shell echo ${LOCALVERSION} | cut -d'.' -f1)
+ LOCALVER_B := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f2)
+ LOCALVER_C := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f3)
+ SLE_LOCALVERSION_CODE := $(shell expr ${LOCALVER_A} \* 65536 + \
+ 0${LOCALVER_B} \* 256 + 0${LOCALVER_C})
+ EXTRA_CFLAGS += -DSLE_LOCALVERSION_CODE=${SLE_LOCALVERSION_CODE}
+endif
+endif
+
EXTRA_CFLAGS += ${CFLAGS_EXTRA}
# get the kernel version - we use this to find the correct install path
# from within a Makefile recipe.
#
# The following variables are expected to be defined for its use:
-#
-# *) GCC_I_SYS -- if set it will enable use of gcc-i-sys.sh wrapper to use -isystem
-# *) CCFLAGS_VAR -- the CCFLAGS variable to set extra CFLAGS
-# *) EXTRA_CFLAGS -- a set of extra CFLAGS to pass into the ccflags-y variable
-# *) KSRC -- the location of the kernel source tree to build against
-# *) DRIVER_UPPERCASE -- the uppercase name of the kernel module, set from DRIVER
+# GCC_I_SYS -- if set it will enable use of gcc-i-sys.sh wrapper to use -isystem
+# CCFLAGS_VAR -- the CCFLAGS variable to set extra CFLAGS
+# EXTRA_CFLAGS -- a set of extra CFLAGS to pass into the ccflags-y variable
+# KSRC -- the location of the kernel source tree to build against
+# DRIVER_UPPERCASE -- the uppercase name of the kernel module, set from DRIVER
#
kernelbuild = ${MAKE} $(if ${GCC_I_SYS},CC="${GCC_I_SYS}") \
${CCFLAGS_VAR}="${EXTRA_CFLAGS}" \
-C "${KSRC}" \
CONFIG_${DRIVER_UPPERCASE}=m \
M="${CURDIR}" \
- ${2} ${1};
+ ${2} ${1}
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/list.h>
+#include <linux/hash.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/ip.h>
#include "i40e_dcb.h"
/* Useful i40e defaults */
-#define I40E_MAX_VEB 16
-
-#define I40E_MAX_NUM_DESCRIPTORS 4096
-#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
-#define I40E_DEFAULT_NUM_DESCRIPTORS 512
-#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
-#define I40E_MIN_NUM_DESCRIPTORS 64
-#define I40E_MIN_MSIX 2
-#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
-#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
+#define I40E_MAX_VEB 16
+
+#define I40E_MAX_NUM_DESCRIPTORS 4096
+#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
+#define I40E_DEFAULT_NUM_DESCRIPTORS 512
+#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
+#define I40E_MIN_NUM_DESCRIPTORS 64
+#define I40E_MIN_MSIX 2
+#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
+#define I40E_MIN_VSI_ALLOC 83 /* LAN, ATR, FCOE, 64 VF, 16 VMDQ */
/* max 16 qps */
#define i40e_default_queues_per_vmdq(pf) \
(((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1)
-#define I40E_DEFAULT_QUEUES_PER_VF 4
-#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
+#define I40E_DEFAULT_QUEUES_PER_VF 4
+#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
#define i40e_pf_get_max_q_per_tc(pf) \
(((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64)
-#define I40E_FDIR_RING 0
-#define I40E_FDIR_RING_COUNT 32
+#define I40E_FDIR_RING 0
+#define I40E_FDIR_RING_COUNT 32
#ifdef I40E_FCOE
-#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */
-#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */
+#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */
+#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */
#endif /* I40E_FCOE */
-#define I40E_MAX_AQ_BUF_SIZE 4096
-#define I40E_AQ_LEN 256
-#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
+#define I40E_MAX_AQ_BUF_SIZE 4096
+#define I40E_AQ_LEN 256
+#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
/*
* If I40E_MAX_USER_PRIORITY is updated please also update
* I40E_CLIENT_MAX_USER_PRIORITY in i40e_client.h and i40evf_client.h
*/
-#define I40E_MAX_USER_PRIORITY 8
-#define I40E_DEFAULT_MSG_ENABLE 4
-#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
-#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DEFAULT_TRAFFIC_CLASS BIT(0)
+#define I40E_DEFAULT_MSG_ENABLE 4
+#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
+#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
/* Ethtool Private Flags */
#define I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT BIT(5)
#endif
-#define I40E_NVM_VERSION_LO_SHIFT 0
-#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
-#define I40E_NVM_VERSION_HI_SHIFT 12
-#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
-#define I40E_OEM_VER_BUILD_MASK 0xffff
-#define I40E_OEM_VER_PATCH_MASK 0xff
-#define I40E_OEM_VER_BUILD_SHIFT 8
-#define I40E_OEM_VER_SHIFT 24
-#define I40E_PHY_DEBUG_PORT BIT(4)
+#define I40E_NVM_VERSION_LO_SHIFT 0
+#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
+#define I40E_NVM_VERSION_HI_SHIFT 12
+#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
+#define I40E_OEM_VER_BUILD_MASK 0xffff
+#define I40E_OEM_VER_PATCH_MASK 0xff
+#define I40E_OEM_VER_BUILD_SHIFT 8
+#define I40E_OEM_VER_SHIFT 24
+#define I40E_PHY_DEBUG_ALL \
+ (I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \
+ I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW)
/* The values in here are decimal coded as hex as is the case in the NVM map*/
-#define I40E_CURRENT_NVM_VERSION_HI 0x2
-#define I40E_CURRENT_NVM_VERSION_LO 0x40
+#define I40E_CURRENT_NVM_VERSION_HI 0x2
+#define I40E_CURRENT_NVM_VERSION_LO 0x40
-/* magic for getting defines into strings */
-#define STRINGIFY(foo) #foo
-#define XSTRINGIFY(bar) STRINGIFY(bar)
-
-#define I40E_RX_DESC(R, i) \
+#define I40E_RX_DESC(R, i) \
(&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
-#define I40E_TX_DESC(R, i) \
+#define I40E_TX_DESC(R, i) \
(&(((struct i40e_tx_desc *)((R)->desc))[i]))
-#define I40E_TX_CTXTDESC(R, i) \
+#define I40E_TX_CTXTDESC(R, i) \
(&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
-#define I40E_TX_FDIRDESC(R, i) \
+#define I40E_TX_FDIRDESC(R, i) \
(&(((struct i40e_filter_program_desc *)((R)->desc))[i]))
/* default to trying for four seconds */
-#define I40E_TRY_LINK_TIMEOUT (4 * HZ)
-
-/**
- * i40e_is_mac_710 - Return true if MAC is X710/XL710
- * @hw: ptr to the hardware info
- **/
-static inline bool i40e_is_mac_710(struct i40e_hw *hw)
-{
- if ((hw->mac.type == I40E_MAC_X710) ||
- (hw->mac.type == I40E_MAC_XL710))
- return true;
-
- return false;
-}
+#define I40E_TRY_LINK_TIMEOUT (4 * HZ)
/* driver state flags */
enum i40e_state_t {
#define I40E_FDIR_BUFFER_HEAD_ROOM 32
#define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4)
-#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
-#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
-#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4)
+#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
+#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
+#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4)
enum i40e_fd_stat_idx {
I40E_FD_STAT_ATR,
u32 fd_id;
};
-#ifndef FLOW_TYPE_MASK
-#define FLOW_TYPE_MASK 0xFF
-#else
-#error FLOW_TYPE_MASK already defined elsewhere
-#endif
-
#define I40E_CLOUD_FIELD_OMAC 0x01
#define I40E_CLOUD_FIELD_IMAC 0x02
#define I40E_CLOUD_FIELD_IVLAN 0x04
#define I40E_CLOUD_FILTER_FLAGS_OMAC I40E_CLOUD_FIELD_OMAC
#define I40E_CLOUD_FILTER_FLAGS_IMAC I40E_CLOUD_FIELD_IMAC
#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN (I40E_CLOUD_FIELD_IMAC | \
- I40E_CLOUD_FIELD_IVLAN)
+ I40E_CLOUD_FIELD_IVLAN)
#define I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID (I40E_CLOUD_FIELD_IMAC | \
- I40E_CLOUD_FIELD_TEN_ID)
+ I40E_CLOUD_FIELD_TEN_ID)
#define I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC (I40E_CLOUD_FIELD_OMAC | \
- I40E_CLOUD_FIELD_IMAC | \
- I40E_CLOUD_FIELD_TEN_ID)
+ I40E_CLOUD_FIELD_IMAC | \
+ I40E_CLOUD_FIELD_TEN_ID)
#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID (I40E_CLOUD_FIELD_IMAC | \
- I40E_CLOUD_FIELD_IVLAN | \
- I40E_CLOUD_FIELD_TEN_ID)
+ I40E_CLOUD_FIELD_IVLAN | \
+ I40E_CLOUD_FIELD_TEN_ID)
#define I40E_CLOUD_FILTER_FLAGS_IIP I40E_CLOUD_FIELD_IIP
struct i40e_cloud_filter {
};
struct i40e_udp_port_config {
- __be16 index;
+ /* AdminQ command interface expects port number in Host byte order */
+ u16 port;
u8 type;
};
#define I40E_FLAG_NO_DCB_SUPPORT BIT_ULL(45)
#define I40E_FLAG_USE_SET_LLDP_MIB BIT_ULL(46)
#define I40E_FLAG_STOP_FW_LLDP BIT_ULL(47)
-#define I40E_FLAG_HAVE_10GBASET_PHY BIT_ULL(48)
+#define I40E_FLAG_PHY_CONTROLS_LEDS BIT_ULL(48)
#define I40E_FLAG_MPLS_HDR_OFFLOAD_CAPABLE BIT_ULL(49)
#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(50)
-
- /* tracks features that get auto disabled by errors */
- u64 auto_disable_flags;
+#define I40E_FLAG_HAVE_CRT_RETIMER BIT_ULL(51)
+#define I40E_FLAG_PTP_L4_CAPABLE BIT_ULL(52)
+#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(56)
+
+ /* flag to enable/disable vf base mode support */
+ bool vf_base_mode_only;
+ /* Tracks features that are disabled due to hw limitations.
+ * If a bit is set here, it means that the corresponding
+ * bit in the 'flags' field is cleared i.e that feature
+ * is disabled
+ */
+ u64 hw_disabled_flags;
#ifdef I40E_FCOE
struct i40e_fcoe fcoe;
struct mutex switch_mutex;
u16 lan_vsi; /* our default LAN VSI */
u16 lan_veb; /* initial relay, if exists */
-#define I40E_NO_VEB 0xffff
-#define I40E_NO_VSI 0xffff
+#define I40E_NO_VEB 0xffff
+#define I40E_NO_VSI 0xffff
u16 next_vsi; /* Next unallocated VSI - 0-based! */
struct i40e_vsi **vsi;
struct i40e_veb *veb[I40E_MAX_VEB];
*/
u16 dcbx_cap;
- u32 fcoe_hmc_filt_num;
- u32 fcoe_hmc_cntx_num;
+ u32 fcoe_hmc_filt_num;
+ u32 fcoe_hmc_cntx_num;
struct i40e_filter_control_settings filter_settings;
#ifdef HAVE_PTP_1588_CLOCK
struct ptp_clock_info ptp_caps;
struct sk_buff *ptp_tx_skb;
struct hwtstamp_config tstamp_config;
- unsigned long last_rx_ptp_check;
- spinlock_t tmreg_lock; /* Used to protect the device time registers. */
+ struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
u64 ptp_base_adj;
u32 rx_hwtstamp_cleared;
+ u32 latch_event_flags;
+ spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */
+ unsigned long latch_events[4];
bool ptp_tx;
bool ptp_rx;
#endif /* HAVE_PTP_1588_CLOCK */
u16 phy_led_val;
};
+/**
+ * i40e_mac_to_hkey - Convert a 6-byte MAC Address to a u64 hash key
+ * @macaddr: the MAC Address as the base key
+ *
+ * Simply copies the address and returns it as a u64 for hashing
+ **/
+static inline u64 i40e_addr_to_hkey(const u8 *macaddr)
+{
+ u64 key = 0;
+
+ ether_addr_copy((u8 *)&key, macaddr);
+ return key;
+}
+
+enum i40e_filter_state {
+ I40E_FILTER_INVALID = 0, /* Invalid state */
+ I40E_FILTER_NEW, /* New, not sent to FW yet */
+ I40E_FILTER_ACTIVE, /* Added to switch by FW */
+ I40E_FILTER_FAILED, /* Rejected by FW */
+ I40E_FILTER_REMOVE, /* To be removed */
+/* There is no 'removed' state; the filter struct is freed */
+};
struct i40e_mac_filter {
- struct list_head list;
+ struct hlist_node hlist;
u8 macaddr[ETH_ALEN];
#define I40E_VLAN_ANY -1
s16 vlan;
- u8 counter; /* number of instances of this filter */
- bool is_vf; /* filter belongs to a VF */
- bool is_netdev; /* filter belongs to a netdev */
- bool changed; /* filter needs to be sync'd to the HW */
- bool is_laa; /* filter is a Locally Administered Address */
+ enum i40e_filter_state state;
};
struct i40e_veb {
struct i40e_pf *pf;
u16 idx;
- u16 veb_idx; /* index of VEB parent */
+ u16 veb_idx; /* index of VEB parent */
u16 seid;
u16 uplink_seid;
- u16 stats_idx; /* index of VEB parent */
+ u16 stats_idx; /* index of VEB parent */
u8 enabled_tc;
u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */
u16 flags;
#define I40E_VSI_FLAG_VEB_OWNER BIT(1)
unsigned long flags;
- /* Per VSI lock to protect elements/list (MAC filter) */
- spinlock_t mac_filter_list_lock;
- struct list_head mac_filter_list;
+ /* Per VSI lock to protect elements/hash (MAC filter) */
+ spinlock_t mac_filter_hash_lock;
+ /* Fixed size hash table with 2^8 buckets for MAC filters */
+ DECLARE_HASHTABLE(mac_filter_hash, 8);
+ bool has_vlan_filter;
/* VSI stats */
#ifdef HAVE_NDO_GET_STATS64
struct i40e_ring **rx_rings;
struct i40e_ring **tx_rings;
+ u32 active_filters;
+ u32 promisc_threshold;
+
u16 work_limit;
- /* high bit set means dynamic, use accessor routines to read/write.
- * hardware only supports 2us resolution for the ITR registers.
- * these values always store the USER setting, and must be converted
- * before programming to a register.
- */
- u16 rx_itr_setting;
- u16 tx_itr_setting;
- u16 int_rate_limit; /* value in usecs */
+ u16 int_rate_limit; /* value in usecs */
- u16 rss_table_size; /* HW RSS table size */
- u16 rss_size; /* Allocated RSS queues */
- u8 *rss_hkey_user; /* User configured hash keys */
- u8 *rss_lut_user; /* User configured lookup table entries */
+ u16 rss_table_size; /* HW RSS table size */
+ u16 rss_size; /* Allocated RSS queues */
+ u8 *rss_hkey_user; /* User configured hash keys */
+ u8 *rss_lut_user; /* User configured lookup table entries */
u16 max_frame;
u16 rx_buf_len;
int base_vector;
bool irqs_ready;
- u16 seid; /* HW index of this VSI (absolute index) */
- u16 id; /* VSI number */
+ u16 seid; /* HW index of this VSI (absolute index) */
+ u16 id; /* VSI number */
u16 uplink_seid;
- u16 base_queue; /* vsi's first queue in hw array */
- u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
- u16 req_queue_pairs; /* User requested queue pairs */
- u16 num_queue_pairs; /* Used tx and rx pairs */
+ u16 base_queue; /* vsi's first queue in hw array */
+ u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
+ u16 req_queue_pairs; /* User requested queue pairs */
+ u16 num_queue_pairs; /* Used tx and rx pairs */
u16 num_desc;
enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
s16 vf_id; /* Virtual function ID for SRIOV VSIs */
/* TC BW limit max quanta within VSI */
u8 bw_ets_max_quanta[I40E_MAX_TRAFFIC_CLASS];
- struct i40e_pf *back; /* Backreference to associated PF */
- u16 idx; /* index in pf->vsi[] */
- u16 veb_idx; /* index of VEB parent */
- struct kobject *kobj; /* sysfs object */
- bool current_isup; /* Sync 'link up' logging */
+ struct i40e_pf *back; /* Backreference to associated PF */
+ u16 idx; /* index in pf->vsi[] */
+ u16 veb_idx; /* index of VEB parent */
+ struct kobject *kobj; /* sysfs object */
+ bool current_isup; /* Sync 'link up' logging */
+ enum i40e_aq_link_speed current_speed; /* Sync link speed logging */
bool block_tx_timeout;
/* VSI specific handlers */
#ifdef HAVE_IRQ_AFFINITY_HINT
cpumask_t affinity_mask;
#endif
+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
+ struct irq_affinity_notify affinity_notify;
+#endif
+
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
bool arm_wb_state;
**/
static inline u64 i40e_read_fd_input_set(struct i40e_pf *pf, u16 addr)
{
- u64 val = 0;
+ u64 val;
- val = (u64)i40e_read_rx_ctl(&pf->hw,
- I40E_PRTQF_FD_INSET(addr, 1)) << 32;
- val |= (u64)i40e_read_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 0));
+ val = i40e_read_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 1));
+ val <<= 32;
+ val += i40e_read_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 0));
return val;
}
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
+void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
+ u16 rss_table_size, u16 rss_size);
struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
+/**
+ * i40e_find_vsi_by_type - Find and return Flow Director VSI
+ * @pf: PF to search for VSI
+ * @type: Value indicating type of VSI we are looking for
+ **/
+static inline struct i40e_vsi *
+i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ struct i40e_vsi *vsi = pf->vsi[i];
+
+ if (vsi && vsi->type == type)
+ return vsi;
+ }
+
+ return NULL;
+}
void i40e_update_stats(struct i40e_vsi *vsi);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
#ifdef HAVE_NDO_GET_STATS64
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
bool printconfig);
-int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
- struct i40e_pf *pf, bool add);
int i40e_add_del_fdir(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input, bool add);
-
/**
* i40e_is_flex_filter - returns true if input filter is flex filter
* @input: pointer to fdir filter
bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
- u8 *macaddr, s16 vlan,
- bool is_vf, bool is_netdev);
+ const u8 *macaddr, s16 vlan);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
- u8 *macaddr, s16 vlan,
- bool is_vf, bool is_netdev);
-void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
- bool is_vf, bool is_netdev);
+ const u8 *macaddr, s16 vlan);
+void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f);
+void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan);
int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1);
int i40e_vsi_release(struct i40e_vsi *vsi);
-struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
- struct i40e_vsi *start_vsi);
int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type);
int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi);
int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi);
struct i40e_vsi_context *ctxt,
u8 enabled_tc, bool is_add);
#endif
-int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
+void i40e_service_event_schedule(struct i40e_pf *pf);
+int i40e_vsi_start_rings(struct i40e_vsi *vsi);
+void i40e_vsi_stop_rings(struct i40e_vsi *vsi);
void i40e_quiesce_vsi(struct i40e_vsi *vsi);
void i40e_unquiesce_vsi(struct i40e_vsi *vsi);
void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf);
int i40e_close(struct net_device *netdev);
int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
-int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
-int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
-struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
- bool is_vf, bool is_netdev);
-int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
- bool is_vf, bool is_netdev);
+int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid);
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid);
+void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid);
+void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid);
+struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
+ const u8 *macaddr);
+int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
-struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
- bool is_vf, bool is_netdev);
+struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
#ifdef I40E_FCOE
#ifdef NETIF_F_HW_TC
int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
**/
-i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
{
i40e_status ret_code;
* i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
* @hw: pointer to the hardware structure
**/
-i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
{
i40e_status ret_code;
* This assumes the posted send buffers have already been cleaned
* and de-allocated
**/
-void i40e_free_adminq_asq(struct i40e_hw *hw)
+static void i40e_free_adminq_asq(struct i40e_hw *hw)
{
i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}
* This assumes the posted receive buffers have already been cleaned
* and de-allocated
**/
-void i40e_free_adminq_arq(struct i40e_hw *hw)
+static void i40e_free_adminq_arq(struct i40e_hw *hw)
{
i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
}
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
-i40e_status i40e_init_asq(struct i40e_hw *hw)
+static i40e_status i40e_init_asq(struct i40e_hw *hw)
{
i40e_status ret_code = I40E_SUCCESS;
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
-i40e_status i40e_init_arq(struct i40e_hw *hw)
+static i40e_status i40e_init_arq(struct i40e_hw *hw)
{
i40e_status ret_code = I40E_SUCCESS;
*
* The main shutdown routine for the Admin Send Queue
**/
-i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
+static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
{
i40e_status ret_code = I40E_SUCCESS;
*
* The main shutdown routine for the Admin Receive Queue
**/
-i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
+static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
{
i40e_status ret_code = I40E_SUCCESS;
*
* returns the number of free desc
**/
-u16 i40e_clean_asq(struct i40e_hw *hw)
+static u16 i40e_clean_asq(struct i40e_hw *hw)
{
struct i40e_adminq_ring *asq = &(hw->aq.asq);
struct i40e_asq_cmd_details *details;
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
flags = LE16_TO_CPU(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
- hw->aq.arq_last_status =
- (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
/* WoL commands */
i40e_aqc_opc_set_wol_filter = 0x0120,
i40e_aqc_opc_get_wake_reason = 0x0121,
+ i40e_aqc_opc_clear_all_wol_filters = 0x025E,
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_remove_control_packet_filter = 0x025B,
i40e_aqc_opc_add_cloud_filters = 0x025C,
i40e_aqc_opc_remove_cloud_filters = 0x025D,
+ i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
/* phy commands*/
i40e_aqc_opc_get_phy_abilities = 0x0600,
/* Set ARP Proxy command / response (indirect 0x0104) */
struct i40e_aqc_arp_proxy_data {
__le16 command_flags;
-#define I40E_AQ_ARP_INIT_IPV4 0x0008
-#define I40E_AQ_ARP_UNSUP_CTL 0x0010
-#define I40E_AQ_ARP_ENA 0x0020
-#define I40E_AQ_ARP_ADD_IPV4 0x0040
-#define I40E_AQ_ARP_DEL_IPV4 0x0080
+#define I40E_AQ_ARP_INIT_IPV4 0x0800
+#define I40E_AQ_ARP_UNSUP_CTL 0x1000
+#define I40E_AQ_ARP_ENA 0x2000
+#define I40E_AQ_ARP_ADD_IPV4 0x4000
+#define I40E_AQ_ARP_DEL_IPV4 0x8000
__le16 table_id;
- __le32 pfpm_proxyfc;
+ __le32 enabled_offloads;
+#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020
+#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800
__le32 ip_addr;
u8 mac_addr[6];
u8 reserved[2];
__le16 table_idx_ipv6_0;
__le16 table_idx_ipv6_1;
__le16 control;
-#define I40E_AQ_NS_PROXY_ADD_0 0x0100
-#define I40E_AQ_NS_PROXY_DEL_0 0x0200
-#define I40E_AQ_NS_PROXY_ADD_1 0x0400
-#define I40E_AQ_NS_PROXY_DEL_1 0x0800
-#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
-#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
-#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
-#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
-#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+#define I40E_AQ_NS_PROXY_ADD_0 0x0001
+#define I40E_AQ_NS_PROXY_DEL_0 0x0002
+#define I40E_AQ_NS_PROXY_ADD_1 0x0004
+#define I40E_AQ_NS_PROXY_DEL_1 0x0008
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400
+#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800
+#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000
u8 mac_addr_0[6];
u8 mac_addr_1[6];
u8 local_mac_addr[6];
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
#define I40E_AQC_MC_MAG_EN_VALID 0x100
-#define I40E_AQC_ADDR_VALID_MASK 0x1F0
+#define I40E_AQC_WOL_PRESERVE_STATUS 0x200
+#define I40E_AQC_ADDR_VALID_MASK 0x3F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
/* Manage MAC Address Write Command (0x0108) */
struct i40e_aqc_mac_address_write {
__le16 command_flags;
+#define I40E_AQC_MC_MAG_EN 0x0100
+#define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
struct i40e_aqc_set_wol_filter {
__le16 filter_index;
#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
+ I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
+
+#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
+#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
+ I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
__le16 cmd_flags;
#define I40E_AQC_SET_WOL_FILTER 0x8000
#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000
+#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
+#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
__le16 valid_flags;
#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
+struct i40e_aqc_set_wol_filter_data {
+ u8 filter[128];
+ u8 mask[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
+
/* Get Wake Reason (0x0121) */
struct i40e_aqc_get_wake_reason_completion {
u8 reserved_1[2];
__le16 wake_reason;
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
u8 reserved_2[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
-struct i40e_aqc_set_wol_filter_data {
- u8 filter[128];
- u8 mask[16];
-};
-
-I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
-
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
__le16 bad_frame_vsi;
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
__le16 default_seid; /* reserved for command */
u8 reserved[10];
};
/* Set Switch Configuration (direct 0x0205) */
struct i40e_aqc_set_switch_config {
__le16 flags;
+/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
__le16 valid_flags;
I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
#define I40E_LINK_SPEED_40GB_SHIFT 0x4
#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+#define I40E_LINK_SPEED_25GB_SHIFT 0x6
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+ I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT),
+ I40E_LINK_SPEED_25GB = (1 << I40E_LINK_SPEED_25GB_SHIFT),
};
struct i40e_aqc_module_desc {
#define I40E_AQ_PHY_LINK_ENABLED 0x08
#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40
+#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
#define I40E_AQ_EEE_1000BASE_T 0x0004
__le32 eeer_val;
u8 d3_lpan;
#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
- u8 reserved[3];
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0x01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0x02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 fec_cfg_curr_mod_ext_info;
+#define I40E_AQ_ENABLE_FEC_KR 0x01
+#define I40E_AQ_ENABLE_FEC_RS 0x02
+#define I40E_AQ_REQUEST_FEC_KR 0x04
+#define I40E_AQ_REQUEST_FEC_RS 0x08
+#define I40E_AQ_ENABLE_FEC_AUTO 0x10
+#define I40E_AQ_FEC
+#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0
+#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5
+
+ u8 ext_comp_code;
u8 phy_id[4];
u8 module_type[3];
u8 qualified_module_count;
__le16 eee_capability;
__le32 eeer;
u8 low_power_ctrl;
- u8 reserved[3];
+ u8 phy_type_ext;
+ u8 fec_config;
+#define I40E_AQ_SET_FEC_ABILITY_KR (1 << 0)
+#define I40E_AQ_SET_FEC_ABILITY_RS (1 << 1)
+#define I40E_AQ_SET_FEC_REQUEST_KR (1 << 2)
+#define I40E_AQ_SET_FEC_REQUEST_RS (1 << 3)
+#define I40E_AQ_SET_FEC_AUTO (1 << 4)
+#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0
+#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT)
+ u8 reserved;
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
#define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03
#define I40E_AQ_LINK_FORCED_40G 0x10
+/* 25G Error Codes */
+#define I40E_AQ_25G_NO_ERR 0X00
+#define I40E_AQ_25G_NOT_PRESENT 0X01
+#define I40E_AQ_25G_NVM_CRC_ERR 0X02
+#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03
+#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
+#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
+#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
+#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 external_power_ability;
+ u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
+#define I40E_AQ_PWR_CLASS_MASK 0x03
u8 reserved[4];
};
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
{
i40e_status status = I40E_SUCCESS;
- if (hw->vendor_id == I40E_INTEL_VENDOR_ID) {
+ if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
case I40E_DEV_ID_SFP_XL710:
case I40E_DEV_ID_QEMU:
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_SFP_I_X722:
- case I40E_DEV_ID_QSFP_I_X722:
hw->mac.type = I40E_MAC_X722;
break;
default:
len = buf_len;
/* write the full 16-byte chunks */
for (i = 0; i < (len - 16); i += 16)
- i40e_debug(hw, mask,
- "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
- i, buf[i], buf[i+1], buf[i+2], buf[i+3],
- buf[i+4], buf[i+5], buf[i+6], buf[i+7],
- buf[i+8], buf[i+9], buf[i+10], buf[i+11],
- buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
- /* the most we could have left is 16 bytes, pad with zeros */
- if (i < len) {
- char d_buf[16];
- int j, i_sav;
-
- i_sav = i;
- memset(d_buf, 0, sizeof(d_buf));
- for (j = 0; i < len; j++, i++)
- d_buf[j] = buf[i];
- i40e_debug(hw, mask,
- "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
- i_sav, d_buf[0], d_buf[1], d_buf[2], d_buf[3],
- d_buf[4], d_buf[5], d_buf[6], d_buf[7],
- d_buf[8], d_buf[9], d_buf[10], d_buf[11],
- d_buf[12], d_buf[13], d_buf[14], d_buf[15]);
- }
+ i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i);
+ /* write whatever's left over without overrunning the buffer */
+ if (i < len)
+ i40e_debug(hw, mask, "\t0x%04X %*ph\n",
+ i, len - i, buf + i);
}
}
/* Non Tunneled IPv6 */
I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(91),
I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
I40E_PTT_UNUSED_ENTRY(255)
};
-/**
- * i40e_validate_mac_addr - Validate unicast MAC address
- * @mac_addr: pointer to MAC address
- *
- * Tests a MAC address to ensure it is a valid Individual Address
- **/
-i40e_status i40e_validate_mac_addr(u8 *mac_addr)
-{
- i40e_status status = I40E_SUCCESS;
-
- /* Broadcast addresses ARE multicast addresses
- * Make sure it is not a multicast address
- * Reject the zero address
- */
- if (is_multicast_ether_addr(mac_addr) ||
- (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
- mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
- status = I40E_ERR_INVALID_MAC_ADDR;
-
- return status;
-}
-
/**
* i40e_init_shared_code - Initialize the shared code
* @hw: pointer to hardware structure
status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
if (flags & I40E_AQC_LAN_ADDR_VALID)
- memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+ ether_addr_copy(mac_addr, addrs.pf_lan_mac);
return status;
}
return status;
if (flags & I40E_AQC_PORT_ADDR_VALID)
- memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
+ ether_addr_copy(mac_addr, addrs.port_mac);
else
status = I40E_ERR_INVALID_MAC_ADDR;
return status;
if (flags & I40E_AQC_SAN_ADDR_VALID)
- memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac));
+ ether_addr_copy(mac_addr, addrs.pf_san_mac);
else
status = I40E_ERR_INVALID_MAC_ADDR;
case I40E_PHY_TYPE_1000BASE_LX:
case I40E_PHY_TYPE_40GBASE_SR4:
case I40E_PHY_TYPE_40GBASE_LR4:
+ case I40E_PHY_TYPE_25GBASE_LR:
+ case I40E_PHY_TYPE_25GBASE_SR:
media = I40E_MEDIA_TYPE_FIBER;
break;
case I40E_PHY_TYPE_100BASE_TX:
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
case I40E_PHY_TYPE_40GBASE_AOC:
case I40E_PHY_TYPE_10GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_CR:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_40GBASE_KR4:
case I40E_PHY_TYPE_20GBASE_KR2:
+ case I40E_PHY_TYPE_25GBASE_KR:
media = I40E_MEDIA_TYPE_BACKPLANE;
break;
case I40E_PHY_TYPE_SGMII:
if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
status = I40E_ERR_UNKNOWN_PHY;
- if (report_init)
+ if (report_init) {
hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
+ hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32);
+ }
return status;
}
config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
/* Copy over all the old settings */
config.phy_type = abilities.phy_type;
+ config.phy_type_ext = abilities.phy_type_ext;
config.link_speed = abilities.link_speed;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status)
return status;
}
-/**
- * i40e_aq_set_mac_config
- * @hw: pointer to the hw struct
- * @max_frame_size: Maximum Frame Size to be supported by the port
- * @crc_en: Tell HW to append a CRC to outgoing frames
- * @pacing: Pacing configurations
- * @cmd_details: pointer to command details structure or NULL
- *
- * Configure MAC settings for frame size, jumbo frame support and the
- * addition of a CRC by the hardware.
- **/
-i40e_status i40e_aq_set_mac_config(struct i40e_hw *hw,
- u16 max_frame_size,
- bool crc_en, u16 pacing,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aq_set_mac_config *cmd =
- (struct i40e_aq_set_mac_config *)&desc.params.raw;
- i40e_status status;
-
- if (max_frame_size == 0)
- return I40E_ERR_PARAM;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_mac_config);
-
- cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
- cmd->params = ((u8)pacing & 0x0F) << 3;
- if (crc_en)
- cmd->params |= I40E_AQ_SET_MAC_CONFIG_CRC_EN;
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
/**
* i40e_aq_clear_pxe_mode
* @hw: pointer to the hw struct
hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
hw_link_info->link_info = resp->link_info;
hw_link_info->an_info = resp->an_info;
+ hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
+ I40E_AQ_CONFIG_FEC_RS_ENA);
hw_link_info->ext_info = resp->ext_info;
hw_link_info->loopback = resp->loopback;
hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
else
hw_link_info->crc_enable = false;
- if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_ENABLE))
+ if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_IS_ENABLED))
hw_link_info->lse_enable = true;
else
hw_link_info->lse_enable = false;
- if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
return status;
}
-/**
- * i40e_aq_get_local_advt_reg
- * @hw: pointer to the hw struct
- * @advt_reg: local AN advertisement register value
- * @cmd_details: pointer to command details structure or NULL
- *
- * Get the Local AN advertisement register value.
- **/
-i40e_status i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
- u64 *advt_reg,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_an_advt_reg *resp =
- (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_get_local_advt_reg);
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- if (status != I40E_SUCCESS)
- goto aq_get_local_advt_reg_exit;
-
- *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32;
- *advt_reg |= LE32_TO_CPU(resp->local_an_reg0);
-
-aq_get_local_advt_reg_exit:
- return status;
-}
-
-/**
- * i40e_aq_set_local_advt_reg
- * @hw: pointer to the hw struct
- * @advt_reg: local AN advertisement register value
- * @cmd_details: pointer to command details structure or NULL
- *
- * Get the Local AN advertisement register value.
- **/
-i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
- u64 advt_reg,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_an_advt_reg *cmd =
- (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_get_local_advt_reg);
-
- cmd->local_an_reg0 = CPU_TO_LE32(lower_32_bits(advt_reg));
- cmd->local_an_reg1 = CPU_TO_LE16(upper_32_bits(advt_reg));
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
- * i40e_aq_get_partner_advt
- * @hw: pointer to the hw struct
- * @advt_reg: AN partner advertisement register value
- * @cmd_details: pointer to command details structure or NULL
- *
- * Get the link partner AN advertisement register value.
- **/
-i40e_status i40e_aq_get_partner_advt(struct i40e_hw *hw,
- u64 *advt_reg,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_an_advt_reg *resp =
- (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_get_partner_advt);
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- if (status != I40E_SUCCESS)
- goto aq_get_partner_advt_exit;
-
- *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32;
- *advt_reg |= LE32_TO_CPU(resp->local_an_reg0);
-
-aq_get_partner_advt_exit:
- return status;
-}
-
-/**
- * i40e_aq_set_lb_modes
- * @hw: pointer to the hw struct
- * @lb_modes: loopback mode to be set
- * @cmd_details: pointer to command details structure or NULL
- *
- * Sets loopback modes.
- **/
-i40e_status i40e_aq_set_lb_modes(struct i40e_hw *hw,
- u16 lb_modes,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_lb_mode *cmd =
- (struct i40e_aqc_set_lb_mode *)&desc.params.raw;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_lb_modes);
-
- cmd->lb_mode = CPU_TO_LE16(lb_modes);
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
/**
* i40e_aq_set_phy_debug
* @hw: pointer to the hw struct
return status;
}
+/**
+ * i40e_aq_clear_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)
+ &desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ cmd->promiscuous_flags = CPU_TO_LE16(0);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
/**
* i40e_aq_set_vsi_unicast_promiscuous
* @hw: pointer to the hw struct
return status;
}
+/**
+* i40e_aq_set_vsi_full_promiscuous
+* @hw: pointer to the hw struct
+* @seid: VSI number
+* @set: set promiscuous enable/disable
+* @cmd_details: pointer to command details structure or NULL
+**/
+i40e_status i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ i40e_status status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags = I40E_AQC_SET_VSI_PROMISC_UNICAST |
+ I40E_AQC_SET_VSI_PROMISC_MULTICAST |
+ I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST |
+ I40E_AQC_SET_VSI_PROMISC_MULTICAST |
+ I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
/**
* i40e_aq_set_vsi_mc_promisc_on_vlan
* @hw: pointer to the hw struct
return status;
}
+/**
+ * i40e_aq_set_vsi_bc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set broadcast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ i40e_status status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
/**
* i40e_aq_set_vsi_broadcast
* @hw: pointer to the hw struct
i40e_aqc_opc_set_vsi_promiscuous_modes);
if (enable)
flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
-
+
cmd->promiscuous_flags = CPU_TO_LE16(flags);
cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_VLAN);
cmd->seid = CPU_TO_LE16(seid);
*api_major_version = LE16_TO_CPU(resp->api_major);
if (api_minor_version != NULL)
*api_minor_version = LE16_TO_CPU(resp->api_minor);
-
- /* A workaround to fix the API version in SW */
- if (api_major_version && api_minor_version &&
- fw_major_version && fw_minor_version &&
- ((*api_major_version == 1) && (*api_minor_version == 1)) &&
- (((*fw_major_version == 4) && (*fw_minor_version >= 2)) ||
- (*fw_major_version > 4)))
- *api_minor_version = 2;
}
return status;
if (status)
return status;
- if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) {
+ /* extra checking needed to ensure link info to user is timely */
+ if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+ ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
+ !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
status = i40e_aq_get_phy_capabilities(hw, false, false,
&abilities, NULL);
if (status)
return status;
- memcpy(hw->phy.link_info.module_type, &abilities.module_type,
- sizeof(hw->phy.link_info.module_type));
+ i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+ sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA);
}
return status;
}
-
-/**
- * i40e_get_link_speed
- * @hw: pointer to the hw struct
- *
- * Returns the link speed of the adapter.
- **/
-enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw)
-{
- enum i40e_aq_link_speed speed = I40E_LINK_SPEED_UNKNOWN;
- i40e_status status = I40E_SUCCESS;
-
- if (hw->phy.get_link_info) {
- status = i40e_aq_get_link_info(hw, true, NULL, NULL);
-
- if (status != I40E_SUCCESS)
- goto i40e_link_speed_exit;
- }
-
- speed = hw->phy.link_info.link_speed;
-
-i40e_link_speed_exit:
- return speed;
-}
-
/**
* i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
* @hw: pointer to the hw struct
}
/**
- * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
- * @hw: pointer to the hw struct
- * @seid: VSI for the vlan filters
- * @v_list: list of vlan filters to be added
- * @count: length of the list
- * @cmd_details: pointer to command details structure or NULL
+ * i40e_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: vf id to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * send msg to vf
**/
-i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details)
+i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
+ struct i40e_aqc_pf_vf_message *cmd =
+ (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
i40e_status status;
- u16 buf_size;
-
- if (count == 0 || !v_list || !hw)
- return I40E_ERR_PARAM;
-
- buf_size = count * sizeof(*v_list);
-
- /* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
- cmd->num_addresses = CPU_TO_LE16(count);
- cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
- cmd->seid[1] = 0;
- cmd->seid[2] = 0;
-
- desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
- status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
- cmd_details);
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
+ cmd->id = CPU_TO_LE32(vfid);
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI);
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ }
+ status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
return status;
}
/**
- * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
- * @hw: pointer to the hw struct
- * @seid: VSI for the vlan filters
- * @v_list: list of macvlans to be removed
- * @count: length of the list
- * @cmd_details: pointer to command details structure or NULL
- **/
-i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
- i40e_status status;
- u16 buf_size;
-
- if (count == 0 || !v_list || !hw)
- return I40E_ERR_PARAM;
-
- buf_size = count * sizeof(*v_list);
-
- /* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
- cmd->num_addresses = CPU_TO_LE16(count);
- cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
- cmd->seid[1] = 0;
- cmd->seid[2] = 0;
-
- desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
-
- status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
- cmd_details);
-
- return status;
-}
-
-/**
- * i40e_aq_send_msg_to_vf
- * @hw: pointer to the hardware structure
- * @vfid: vf id to send msg
- * @v_opcode: opcodes for VF-PF communication
- * @v_retval: return error code
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- * @cmd_details: pointer to command details
- *
- * send msg to vf
- **/
-i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
- u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_pf_vf_message *cmd =
- (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
- cmd->id = CPU_TO_LE32(vfid);
- desc.cookie_high = CPU_TO_LE32(v_opcode);
- desc.cookie_low = CPU_TO_LE32(v_retval);
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI);
- if (msglen) {
- desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
- I40E_AQ_FLAG_RD));
- if (msglen > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
- desc.datalen = CPU_TO_LE16(msglen);
- }
- status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
-
- return status;
-}
-
-/**
- * i40e_aq_debug_read_register
+ * i40e_aq_debug_read_register
* @hw: pointer to the hw struct
* @reg_addr: register address
* @reg_val: register value
break;
case I40E_AQ_CAP_ID_MNG_MODE:
p->management_mode = number;
+ if (major_rev > 1) {
+ p->mng_protocols_over_mctp = logical_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Protocols over MCTP = %d\n",
+ p->mng_protocols_over_mctp);
+ } else {
+ p->mng_protocols_over_mctp = 0;
+ }
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: Management Mode = %d\n",
p->management_mode);
else
p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL;
p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0;
- p->proxy_support = p->proxy_support;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: WOL proxy filters = %d\n",
hw->num_wol_proxy_filters);
/* partition id is 1-based, and functions are evenly spread
* across the ports as partitions
*/
- hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
- hw->num_partitions = num_functions / hw->num_ports;
+ if (hw->num_ports != 0) {
+ hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
+ hw->num_partitions = num_functions / hw->num_ports;
+ }
/* additional HW specific goodies that might
* someday be HW version specific
return status;
}
-/**
- * i40e_aq_add_lldp_tlv
- * @hw: pointer to the hw struct
- * @bridge_type: type of bridge
- * @buff: buffer with TLV to add
- * @buff_size: length of the buffer
- * @tlv_len: length of the TLV to be added
- * @mib_len: length of the LLDP MIB returned in response
- * @cmd_details: pointer to command details structure or NULL
- *
- * Add the specified TLV to LLDP Local MIB for the given bridge type,
- * it is responsibility of the caller to make sure that the TLV is not
- * already present in the LLDPDU.
- * In return firmware will write the complete LLDP MIB with the newly
- * added TLV in the response buffer.
- **/
-i40e_status i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
- void *buff, u16 buff_size, u16 tlv_len,
- u16 *mib_len,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_add_tlv *cmd =
- (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
- i40e_status status;
-
- if (buff_size == 0 || !buff || tlv_len == 0)
- return I40E_ERR_PARAM;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_add_tlv);
-
- /* Indirect Command */
- desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
- desc.datalen = CPU_TO_LE16(buff_size);
-
- cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
- I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
- cmd->len = CPU_TO_LE16(tlv_len);
-
- status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
- if (!status) {
- if (mib_len != NULL)
- *mib_len = LE16_TO_CPU(desc.datalen);
- }
-
- return status;
-}
-
-/**
- * i40e_aq_update_lldp_tlv
- * @hw: pointer to the hw struct
- * @bridge_type: type of bridge
- * @buff: buffer with TLV to update
- * @buff_size: size of the buffer holding original and updated TLVs
- * @old_len: Length of the Original TLV
- * @new_len: Length of the Updated TLV
- * @offset: offset of the updated TLV in the buff
- * @mib_len: length of the returned LLDP MIB
- * @cmd_details: pointer to command details structure or NULL
- *
- * Update the specified TLV to the LLDP Local MIB for the given bridge type.
- * Firmware will place the complete LLDP MIB in response buffer with the
- * updated TLV.
- **/
-i40e_status i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
- u8 bridge_type, void *buff, u16 buff_size,
- u16 old_len, u16 new_len, u16 offset,
- u16 *mib_len,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_update_tlv *cmd =
- (struct i40e_aqc_lldp_update_tlv *)&desc.params.raw;
- i40e_status status;
-
- if (buff_size == 0 || !buff || offset == 0 ||
- old_len == 0 || new_len == 0)
- return I40E_ERR_PARAM;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_tlv);
-
- /* Indirect Command */
- desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
- desc.datalen = CPU_TO_LE16(buff_size);
-
- cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
- I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
- cmd->old_len = CPU_TO_LE16(old_len);
- cmd->new_offset = CPU_TO_LE16(offset);
- cmd->new_len = CPU_TO_LE16(new_len);
-
- status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
- if (!status) {
- if (mib_len != NULL)
- *mib_len = LE16_TO_CPU(desc.datalen);
- }
-
- return status;
-}
-
-/**
- * i40e_aq_delete_lldp_tlv
- * @hw: pointer to the hw struct
- * @bridge_type: type of bridge
- * @buff: pointer to a user supplied buffer that has the TLV
- * @buff_size: length of the buffer
- * @tlv_len: length of the TLV to be deleted
- * @mib_len: length of the returned LLDP MIB
- * @cmd_details: pointer to command details structure or NULL
- *
- * Delete the specified TLV from LLDP Local MIB for the given bridge type.
- * The firmware places the entire LLDP MIB in the response buffer.
- **/
-i40e_status i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
- u8 bridge_type, void *buff, u16 buff_size,
- u16 tlv_len, u16 *mib_len,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_add_tlv *cmd =
- (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
- i40e_status status;
-
- if (buff_size == 0 || !buff)
- return I40E_ERR_PARAM;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_delete_tlv);
-
- /* Indirect Command */
- desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
- desc.datalen = CPU_TO_LE16(buff_size);
- cmd->len = CPU_TO_LE16(tlv_len);
- cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
- I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
-
- status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
- if (!status) {
- if (mib_len != NULL)
- *mib_len = LE16_TO_CPU(desc.datalen);
- }
-
- return status;
-}
-
/**
* i40e_aq_stop_lldp
* @hw: pointer to the hw struct
}
/**
- * i40e_aq_add_pvirt - Instantiate a Port Virtualizer on a port
+ * i40e_aq_dcb_updated - DCB Updated Command
* @hw: pointer to the hw struct
- * @flags: component flags
- * @mac_seid: uplink seid (MAC SEID)
- * @vsi_seid: connected vsi seid
- * @ret_seid: seid of create pv component
+ * @cmd_details: pointer to command details structure or NULL
*
- * This instantiates an i40e port virtualizer with specified flags.
- * Depending on specified flags the port virtualizer can act as a
- * 802.1Qbr port virtualizer or a 802.1Qbg S-component.
- */
-i40e_status i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags,
- u16 mac_seid, u16 vsi_seid,
- u16 *ret_seid)
+ * When LLDP is handled in PF this command is used by the PF
+ * to notify EMP that a DCB setting is modified.
+ * When LLDP is handled in EMP this command is used by the PF
+ * to notify EMP whenever one of the following parameters get
+ * modified:
+ * - PFCLinkDelayAllowance in PRTDCB_GENC.PFCLDA
+ * - PCIRTT in PRTDCB_GENC.PCIRTT
+ * - Maximum Frame Size for non-FCoE TCs set by PRTDCB_TDPUC.MAX_TXFRAME.
+ * EMP will return when the shared RPB settings have been
+ * recomputed and modified. The retval field in the descriptor
+ * will be set to 0 when RPB is modified.
+ **/
+i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- struct i40e_aqc_add_update_pv *cmd =
- (struct i40e_aqc_add_update_pv *)&desc.params.raw;
- struct i40e_aqc_add_update_pv_completion *resp =
- (struct i40e_aqc_add_update_pv_completion *)&desc.params.raw;
i40e_status status;
- if (vsi_seid == 0)
- return I40E_ERR_PARAM;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_pv);
- cmd->command_flags = CPU_TO_LE16(flags);
- cmd->uplink_seid = CPU_TO_LE16(mac_seid);
- cmd->connected_seid = CPU_TO_LE16(vsi_seid);
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
- if (!status && ret_seid)
- *ret_seid = LE16_TO_CPU(resp->pv_seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
}
/**
- * i40e_aq_add_tag - Add an S/E-tag
+ * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
* @hw: pointer to the hw struct
- * @direct_to_queue: should s-tag direct flow to a specific queue
- * @vsi_seid: VSI SEID to use this tag
- * @tag: value of the tag
- * @queue_num: queue number, only valid is direct_to_queue is true
- * @tags_used: return value, number of tags in use by this PF
- * @tags_free: return value, number of unallocated tags
+ * @seid: seid for the physical port/switching component/vsi
+ * @buff: Indirect buffer to hold data parameters and response
+ * @buff_size: Indirect buffer size
+ * @opcode: Tx scheduler AQ command opcode
* @cmd_details: pointer to command details structure or NULL
*
- * This associates an S- or E-tag to a VSI in the switch complex. It returns
- * the number of tags allocated by the PF, and the number of unallocated
- * tags available.
+ * Generic command handler for Tx scheduler AQ commands
**/
-i40e_status i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue,
- u16 vsi_seid, u16 tag, u16 queue_num,
- u16 *tags_used, u16 *tags_free,
+static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+ void *buff, u16 buff_size,
+ enum i40e_admin_queue_opc opcode,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- struct i40e_aqc_add_tag *cmd =
- (struct i40e_aqc_add_tag *)&desc.params.raw;
- struct i40e_aqc_add_remove_tag_completion *resp =
- (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw;
+ struct i40e_aqc_tx_sched_ind *cmd =
+ (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
i40e_status status;
+ bool cmd_param_flag = false;
- if (vsi_seid == 0)
+ switch (opcode) {
+ case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
+ case i40e_aqc_opc_configure_vsi_tc_bw:
+ case i40e_aqc_opc_enable_switching_comp_ets:
+ case i40e_aqc_opc_modify_switching_comp_ets:
+ case i40e_aqc_opc_disable_switching_comp_ets:
+ case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
+ case i40e_aqc_opc_configure_switching_comp_bw_config:
+ cmd_param_flag = true;
+ break;
+ case i40e_aqc_opc_query_vsi_bw_config:
+ case i40e_aqc_opc_query_vsi_ets_sla_config:
+ case i40e_aqc_opc_query_switching_comp_ets_config:
+ case i40e_aqc_opc_query_port_ets_config:
+ case i40e_aqc_opc_query_switching_comp_bw_config:
+ cmd_param_flag = false;
+ break;
+ default:
return I40E_ERR_PARAM;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, opcode);
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_tag);
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (cmd_param_flag)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
- cmd->seid = CPU_TO_LE16(vsi_seid);
- cmd->tag = CPU_TO_LE16(tag);
- if (direct_to_queue) {
- cmd->flags = CPU_TO_LE16(I40E_AQC_ADD_TAG_FLAG_TO_QUEUE);
- cmd->queue_number = CPU_TO_LE16(queue_num);
- }
+ desc.datalen = CPU_TO_LE16(buff_size);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ cmd->vsi_seid = CPU_TO_LE16(seid);
- if (!status) {
- if (tags_used != NULL)
- *tags_used = LE16_TO_CPU(resp->tags_used);
- if (tags_free != NULL)
- *tags_free = LE16_TO_CPU(resp->tags_free);
- }
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
return status;
}
/**
- * i40e_aq_remove_tag - Remove an S- or E-tag
+ * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
* @hw: pointer to the hw struct
- * @vsi_seid: VSI SEID this tag is associated with
- * @tag: value of the S-tag to delete
- * @tags_used: return value, number of tags in use by this PF
- * @tags_free: return value, number of unallocated tags
+ * @seid: VSI seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_credit: Max BW limit credits
* @cmd_details: pointer to command details structure or NULL
- *
- * This deletes an S- or E-tag from a VSI in the switch complex. It returns
- * the number of tags allocated by the PF, and the number of unallocated
- * tags available.
**/
-i40e_status i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid,
- u16 tag, u16 *tags_used, u16 *tags_free,
+i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- struct i40e_aqc_remove_tag *cmd =
- (struct i40e_aqc_remove_tag *)&desc.params.raw;
- struct i40e_aqc_add_remove_tag_completion *resp =
- (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw;
+ struct i40e_aqc_configure_vsi_bw_limit *cmd =
+ (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
i40e_status status;
- if (vsi_seid == 0)
- return I40E_ERR_PARAM;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_tag);
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_vsi_bw_limit);
- cmd->seid = CPU_TO_LE16(vsi_seid);
- cmd->tag = CPU_TO_LE16(tag);
+ cmd->vsi_seid = CPU_TO_LE16(seid);
+ cmd->credit = CPU_TO_LE16(credit);
+ cmd->max_credit = max_credit;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
- if (!status) {
- if (tags_used != NULL)
- *tags_used = LE16_TO_CPU(resp->tags_used);
- if (tags_free != NULL)
- *tags_free = LE16_TO_CPU(resp->tags_free);
- }
-
return status;
}
/**
- * i40e_aq_add_mcast_etag - Add a multicast E-tag
+ * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
* @hw: pointer to the hw struct
- * @pv_seid: Port Virtualizer of this SEID to associate E-tag with
- * @etag: value of E-tag to add
- * @num_tags_in_buf: number of unicast E-tags in indirect buffer
- * @buf: address of indirect buffer
- * @tags_used: return value, number of E-tags in use by this port
- * @tags_free: return value, number of unallocated M-tags
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
* @cmd_details: pointer to command details structure or NULL
- *
- * This associates a multicast E-tag to a port virtualizer. It will return
- * the number of tags allocated by the PF, and the number of unallocated
- * tags available.
- *
- * The indirect buffer pointed to by buf is a list of 2-byte E-tags,
- * num_tags_in_buf long.
**/
-i40e_status i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
- u16 etag, u8 num_tags_in_buf, void *buf,
- u16 *tags_used, u16 *tags_free,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_mcast_etag *cmd =
- (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw;
- struct i40e_aqc_add_remove_mcast_etag_completion *resp =
- (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw;
- i40e_status status;
- u16 length = sizeof(u16) * num_tags_in_buf;
-
- if ((pv_seid == 0) || (buf == NULL) || (num_tags_in_buf == 0))
- return I40E_ERR_PARAM;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_add_multicast_etag);
-
- cmd->pv_seid = CPU_TO_LE16(pv_seid);
- cmd->etag = CPU_TO_LE16(etag);
- cmd->num_unicast_etags = num_tags_in_buf;
-
- desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (length > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
-
- status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
-
- if (!status) {
- if (tags_used != NULL)
- *tags_used = LE16_TO_CPU(resp->mcast_etags_used);
- if (tags_free != NULL)
- *tags_free = LE16_TO_CPU(resp->mcast_etags_free);
- }
-
- return status;
-}
-
-/**
- * i40e_aq_remove_mcast_etag - Remove a multicast E-tag
- * @hw: pointer to the hw struct
- * @pv_seid: Port Virtualizer SEID this M-tag is associated with
- * @etag: value of the E-tag to remove
- * @tags_used: return value, number of tags in use by this port
- * @tags_free: return value, number of unallocated tags
- * @cmd_details: pointer to command details structure or NULL
- *
- * This deletes an E-tag from the port virtualizer. It will return
- * the number of tags allocated by the port, and the number of unallocated
- * tags available.
- **/
-i40e_status i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
- u16 etag, u16 *tags_used, u16 *tags_free,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_mcast_etag *cmd =
- (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw;
- struct i40e_aqc_add_remove_mcast_etag_completion *resp =
- (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw;
- i40e_status status;
-
-
- if (pv_seid == 0)
- return I40E_ERR_PARAM;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_remove_multicast_etag);
-
- cmd->pv_seid = CPU_TO_LE16(pv_seid);
- cmd->etag = CPU_TO_LE16(etag);
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- if (!status) {
- if (tags_used != NULL)
- *tags_used = LE16_TO_CPU(resp->mcast_etags_used);
- if (tags_free != NULL)
- *tags_free = LE16_TO_CPU(resp->mcast_etags_free);
- }
-
- return status;
-}
-
-/**
- * i40e_aq_update_tag - Update an S/E-tag
- * @hw: pointer to the hw struct
- * @vsi_seid: VSI SEID using this S-tag
- * @old_tag: old tag value
- * @new_tag: new tag value
- * @tags_used: return value, number of tags in use by this PF
- * @tags_free: return value, number of unallocated tags
- * @cmd_details: pointer to command details structure or NULL
- *
- * This updates the value of the tag currently attached to this VSI
- * in the switch complex. It will return the number of tags allocated
- * by the PF, and the number of unallocated tags available.
- **/
-i40e_status i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid,
- u16 old_tag, u16 new_tag, u16 *tags_used,
- u16 *tags_free,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_update_tag *cmd =
- (struct i40e_aqc_update_tag *)&desc.params.raw;
- struct i40e_aqc_update_tag_completion *resp =
- (struct i40e_aqc_update_tag_completion *)&desc.params.raw;
- i40e_status status;
-
- if (vsi_seid == 0)
- return I40E_ERR_PARAM;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_tag);
-
- cmd->seid = CPU_TO_LE16(vsi_seid);
- cmd->old_tag = CPU_TO_LE16(old_tag);
- cmd->new_tag = CPU_TO_LE16(new_tag);
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- if (!status) {
- if (tags_used != NULL)
- *tags_used = LE16_TO_CPU(resp->tags_used);
- if (tags_free != NULL)
- *tags_free = LE16_TO_CPU(resp->tags_free);
- }
-
- return status;
-}
-
-/**
- * i40e_aq_dcb_ignore_pfc - Ignore PFC for given TCs
- * @hw: pointer to the hw struct
- * @tcmap: TC map for request/release any ignore PFC condition
- * @request: request or release ignore PFC condition
- * @tcmap_ret: return TCs for which PFC is currently ignored
- * @cmd_details: pointer to command details structure or NULL
- *
- * This sends out request/release to ignore PFC condition for a TC.
- * It will return the TCs for which PFC is currently ignored.
- **/
-i40e_status i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw, u8 tcmap,
- bool request, u8 *tcmap_ret,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_pfc_ignore *cmd_resp =
- (struct i40e_aqc_pfc_ignore *)&desc.params.raw;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_ignore_pfc);
-
- if (request)
- cmd_resp->command_flags = I40E_AQC_PFC_IGNORE_SET;
-
- cmd_resp->tc_bitmap = tcmap;
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- if (!status) {
- if (tcmap_ret != NULL)
- *tcmap_ret = cmd_resp->tc_bitmap;
- }
-
- return status;
-}
-
-/**
- * i40e_aq_dcb_updated - DCB Updated Command
- * @hw: pointer to the hw struct
- * @cmd_details: pointer to command details structure or NULL
- *
- * When LLDP is handled in PF this command is used by the PF
- * to notify EMP that a DCB setting is modified.
- * When LLDP is handled in EMP this command is used by the PF
- * to notify EMP whenever one of the following parameters get
- * modified:
- * - PFCLinkDelayAllowance in PRTDCB_GENC.PFCLDA
- * - PCIRTT in PRTDCB_GENC.PCIRTT
- * - Maximum Frame Size for non-FCoE TCs set by PRTDCB_TDPUC.MAX_TXFRAME.
- * EMP will return when the shared RPB settings have been
- * recomputed and modified. The retval field in the descriptor
- * will be set to 0 when RPB is modified.
- **/
-i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
- * i40e_aq_add_statistics - Add a statistics block to a VLAN in a switch.
- * @hw: pointer to the hw struct
- * @seid: defines the SEID of the switch for which the stats are requested
- * @vlan_id: the VLAN ID for which the statistics are requested
- * @stat_index: index of the statistics counters block assigned to this VLAN
- * @cmd_details: pointer to command details structure or NULL
- *
- * XL710 supports 128 smonVlanStats counters.This command is used to
- * allocate a set of smonVlanStats counters to a specific VLAN in a specific
- * switch.
- **/
-i40e_status i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
- u16 vlan_id, u16 *stat_index,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_statistics *cmd_resp =
- (struct i40e_aqc_add_remove_statistics *)&desc.params.raw;
- i40e_status status;
-
- if ((seid == 0) || (stat_index == NULL))
- return I40E_ERR_PARAM;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_statistics);
-
- cmd_resp->seid = CPU_TO_LE16(seid);
- cmd_resp->vlan = CPU_TO_LE16(vlan_id);
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- if (!status && stat_index)
- *stat_index = LE16_TO_CPU(cmd_resp->stat_index);
-
- return status;
-}
-
-/**
- * i40e_aq_remove_statistics - Remove a statistics block to a VLAN in a switch.
- * @hw: pointer to the hw struct
- * @seid: defines the SEID of the switch for which the stats are requested
- * @vlan_id: the VLAN ID for which the statistics are requested
- * @stat_index: index of the statistics counters block assigned to this VLAN
- * @cmd_details: pointer to command details structure or NULL
- *
- * XL710 supports 128 smonVlanStats counters.This command is used to
- * deallocate a set of smonVlanStats counters to a specific VLAN in a specific
- * switch.
- **/
-i40e_status i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid,
- u16 vlan_id, u16 stat_index,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_statistics *cmd =
- (struct i40e_aqc_add_remove_statistics *)&desc.params.raw;
- i40e_status status;
-
- if (seid == 0)
- return I40E_ERR_PARAM;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_remove_statistics);
-
- cmd->seid = CPU_TO_LE16(seid);
- cmd->vlan = CPU_TO_LE16(vlan_id);
- cmd->stat_index = CPU_TO_LE16(stat_index);
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
- * i40e_aq_set_port_parameters - set physical port parameters.
- * @hw: pointer to the hw struct
- * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
- * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
- * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
- * @double_vlan: if set double VLAN is enabled
- * @cmd_details: pointer to command details structure or NULL
- **/
-i40e_status i40e_aq_set_port_parameters(struct i40e_hw *hw,
- u16 bad_frame_vsi, bool save_bad_pac,
- bool pad_short_pac, bool double_vlan,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aqc_set_port_parameters *cmd;
- i40e_status status;
- struct i40e_aq_desc desc;
- u16 command_flags = 0;
-
- cmd = (struct i40e_aqc_set_port_parameters *)&desc.params.raw;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_port_parameters);
-
- cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
- if (save_bad_pac)
- command_flags |= I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS;
- if (pad_short_pac)
- command_flags |= I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS;
- if (double_vlan)
- command_flags |= I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA;
- cmd->command_flags = CPU_TO_LE16(command_flags);
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
- * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
- * @hw: pointer to the hw struct
- * @seid: seid for the physical port/switching component/vsi
- * @buff: Indirect buffer to hold data parameters and response
- * @buff_size: Indirect buffer size
- * @opcode: Tx scheduler AQ command opcode
- * @cmd_details: pointer to command details structure or NULL
- *
- * Generic command handler for Tx scheduler AQ commands
- **/
-static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
- void *buff, u16 buff_size,
- enum i40e_admin_queue_opc opcode,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_tx_sched_ind *cmd =
- (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
- i40e_status status;
- bool cmd_param_flag = false;
-
- switch (opcode) {
- case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
- case i40e_aqc_opc_configure_vsi_tc_bw:
- case i40e_aqc_opc_enable_switching_comp_ets:
- case i40e_aqc_opc_modify_switching_comp_ets:
- case i40e_aqc_opc_disable_switching_comp_ets:
- case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
- case i40e_aqc_opc_configure_switching_comp_bw_config:
- cmd_param_flag = true;
- break;
- case i40e_aqc_opc_query_vsi_bw_config:
- case i40e_aqc_opc_query_vsi_ets_sla_config:
- case i40e_aqc_opc_query_switching_comp_ets_config:
- case i40e_aqc_opc_query_port_ets_config:
- case i40e_aqc_opc_query_switching_comp_bw_config:
- cmd_param_flag = false;
- break;
- default:
- return I40E_ERR_PARAM;
- }
-
- i40e_fill_default_direct_cmd_desc(&desc, opcode);
-
- /* Indirect command */
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
- if (cmd_param_flag)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
- if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
-
- desc.datalen = CPU_TO_LE16(buff_size);
-
- cmd->vsi_seid = CPU_TO_LE16(seid);
-
- status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
-
- return status;
-}
-
-/**
- * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
- * @hw: pointer to the hw struct
- * @seid: VSI seid
- * @credit: BW limit credits (0 = disabled)
- * @max_credit: Max BW limit credits
- * @cmd_details: pointer to command details structure or NULL
- **/
-i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
- u16 seid, u16 credit, u8 max_credit,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_configure_vsi_bw_limit *cmd =
- (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_configure_vsi_bw_limit);
-
- cmd->vsi_seid = CPU_TO_LE16(seid);
- cmd->credit = CPU_TO_LE16(credit);
- cmd->max_credit = max_credit;
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
- * i40e_aq_config_switch_comp_bw_limit - Configure Switching component BW Limit
- * @hw: pointer to the hw struct
- * @seid: switching component seid
- * @credit: BW limit credits (0 = disabled)
- * @max_bw: Max BW limit credits
- * @cmd_details: pointer to command details structure or NULL
- **/
-i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
- u16 seid, u16 credit, u8 max_bw,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_configure_switching_comp_bw_limit *cmd =
- (struct i40e_aqc_configure_switching_comp_bw_limit *)&desc.params.raw;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_configure_switching_comp_bw_limit);
-
- cmd->seid = CPU_TO_LE16(seid);
- cmd->credit = CPU_TO_LE16(credit);
- cmd->max_bw = max_bw;
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
- * i40e_aq_config_vsi_ets_sla_bw_limit - Config VSI BW Limit per TC
- * @hw: pointer to the hw struct
- * @seid: VSI seid
- * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits
- * @cmd_details: pointer to command details structure or NULL
- **/
-i40e_status i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
-{
- return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
- i40e_aqc_opc_configure_vsi_ets_sla_bw_limit,
- cmd_details);
-}
-
-/**
- * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
- * @hw: pointer to the hw struct
- * @seid: VSI seid
- * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
- * @cmd_details: pointer to command details structure or NULL
- **/
-i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_configure_vsi_tc_bw,
cmd_details);
}
-/**
- * i40e_aq_config_switch_comp_ets_bw_limit - Config Switch comp BW Limit per TC
- * @hw: pointer to the hw struct
- * @seid: seid of the switching component
- * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits
- * @cmd_details: pointer to command details structure or NULL
- **/
-i40e_status i40e_aq_config_switch_comp_ets_bw_limit(
- struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
-{
- return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
- i40e_aqc_opc_configure_switching_comp_ets_bw_limit,
- cmd_details);
-}
-
/**
* i40e_aq_query_vsi_bw_config - Query VSI BW configuration
* @hw: pointer to the hw struct
}
if (mac_addr)
- i40e_memcpy(cmd->mac, mac_addr, ETH_ALEN,
- I40E_NONDMA_TO_NONDMA);
+ ether_addr_copy(cmd->mac, mac_addr);
cmd->etype = CPU_TO_LE16(ethtype);
cmd->flags = CPU_TO_LE16(flags);
u16 tnl_type;
u32 ti;
- tnl_type = (le16_to_cpu(f[i].flags) &
+ tnl_type = (LE16_TO_CPU(f[i].flags) &
I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
- ti = le32_to_cpu(f[i].tenant_id);
- f[i].tenant_id = cpu_to_le32(ti << 8);
+ ti = LE32_TO_CPU(f[i].tenant_id);
+ f[i].tenant_id = CPU_TO_LE32(ti << 8);
}
}
}
i40e_fix_up_geneve_vni(filters, filter_count);
- status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
-
- return status;
-}
-
-/**
- * i40e_aq_remove_cloud_filters
- * @hw: pointer to the hardware structure
- * @seid: VSI seid to remove cloud filters from
- * @filters: Buffer which contains the filters to be removed
- * @filter_count: number of filters contained in the buffer
- *
- * Remove the cloud filters for a given VSI. The contents of the
- * i40e_aqc_add_remove_cloud_filters_element_data are filled
- * in by the caller of the function.
- *
- **/
-i40e_status i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
- u8 filter_count)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_cloud_filters *cmd =
- (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- i40e_status status;
- u16 buff_len;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_remove_cloud_filters);
-
- buff_len = filter_count * sizeof(*filters);
- desc.datalen = CPU_TO_LE16(buff_len);
- desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- cmd->num_filters = filter_count;
- cmd->seid = CPU_TO_LE16(seid);
-
- i40e_fix_up_geneve_vni(filters, filter_count);
-
- status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
-
- return status;
-}
-
-/**
- * i40e_aq_alternate_write
- * @hw: pointer to the hardware structure
- * @reg_addr0: address of first dword to be read
- * @reg_val0: value to be written under 'reg_addr0'
- * @reg_addr1: address of second dword to be read
- * @reg_val1: value to be written under 'reg_addr1'
- *
- * Write one or two dwords to alternate structure. Fields are indicated
- * by 'reg_addr0' and 'reg_addr1' register numbers.
- *
- **/
-i40e_status i40e_aq_alternate_write(struct i40e_hw *hw,
- u32 reg_addr0, u32 reg_val0,
- u32 reg_addr1, u32 reg_val1)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_alternate_write *cmd_resp =
- (struct i40e_aqc_alternate_write *)&desc.params.raw;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write);
- cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
- cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
- cmd_resp->data0 = CPU_TO_LE32(reg_val0);
- cmd_resp->data1 = CPU_TO_LE32(reg_val1);
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
return status;
}
/**
- * i40e_aq_alternate_write_indirect
+ * i40e_aq_remove_cloud_filters
* @hw: pointer to the hardware structure
- * @addr: address of a first register to be modified
- * @dw_count: number of alternate structure fields to write
- * @buffer: pointer to the command buffer
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters to be removed
+ * @filter_count: number of filters contained in the buffer
*
- * Write 'dw_count' dwords from 'buffer' to alternate structure
- * starting at 'addr'.
+ * Remove the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_add_remove_cloud_filters_element_data are filled
+ * in by the caller of the function.
*
**/
-i40e_status i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
- u32 addr, u32 dw_count, void *buffer)
+i40e_status i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count)
{
struct i40e_aq_desc desc;
- struct i40e_aqc_alternate_ind_write *cmd_resp =
- (struct i40e_aqc_alternate_ind_write *)&desc.params.raw;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
i40e_status status;
+ u16 buff_len;
- if (buffer == NULL)
- return I40E_ERR_PARAM;
-
- /* Indirect command */
i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_alternate_write_indirect);
+ i40e_aqc_opc_remove_cloud_filters);
- desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD);
- desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
- if (dw_count > (I40E_AQ_LARGE_BUF/4))
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
- cmd_resp->address = CPU_TO_LE32(addr);
- cmd_resp->length = CPU_TO_LE32(dw_count);
+ i40e_fix_up_geneve_vni(filters, filter_count);
- status = i40e_asq_send_command(hw, &desc, buffer,
- lower_32_bits(4*dw_count), NULL);
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
return status;
}
return status;
}
-/**
- * i40e_aq_alternate_read_indirect
- * @hw: pointer to the hardware structure
- * @addr: address of the alternate structure field
- * @dw_count: number of alternate structure fields to read
- * @buffer: pointer to the command buffer
- *
- * Read 'dw_count' dwords from alternate structure starting at 'addr' and
- * place them in 'buffer'. The buffer should be allocated by caller.
- *
- **/
-i40e_status i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
- u32 addr, u32 dw_count, void *buffer)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_alternate_ind_write *cmd_resp =
- (struct i40e_aqc_alternate_ind_write *)&desc.params.raw;
- i40e_status status;
-
- if (buffer == NULL)
- return I40E_ERR_PARAM;
-
- /* Indirect command */
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_alternate_read_indirect);
-
- desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD);
- desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
- if (dw_count > (I40E_AQ_LARGE_BUF/4))
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
-
- cmd_resp->address = CPU_TO_LE32(addr);
- cmd_resp->length = CPU_TO_LE32(dw_count);
-
- status = i40e_asq_send_command(hw, &desc, buffer,
- lower_32_bits(4*dw_count), NULL);
-
- return status;
-}
-
-/**
- * i40e_aq_alternate_clear
- * @hw: pointer to the HW structure.
- *
- * Clear the alternate structures of the port from which the function
- * is called.
- *
- **/
-i40e_status i40e_aq_alternate_clear(struct i40e_hw *hw)
-{
- struct i40e_aq_desc desc;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_alternate_clear_port);
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
-
- return status;
-}
-
-/**
- * i40e_aq_alternate_write_done
- * @hw: pointer to the HW structure.
- * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
- * @reset_needed: indicates the SW should trigger GLOBAL reset
- *
- * Indicates to the FW that alternate structures have been changed.
- *
- **/
-i40e_status i40e_aq_alternate_write_done(struct i40e_hw *hw,
- u8 bios_mode, bool *reset_needed)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_alternate_write_done *cmd =
- (struct i40e_aqc_alternate_write_done *)&desc.params.raw;
- i40e_status status;
-
- if (reset_needed == NULL)
- return I40E_ERR_PARAM;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_alternate_write_done);
-
- cmd->cmd_flags = CPU_TO_LE16(bios_mode);
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
- if (!status && reset_needed)
- *reset_needed = ((LE16_TO_CPU(cmd->cmd_flags) &
- I40E_AQ_ALTERNATE_RESET_NEEDED) != 0);
-
- return status;
-}
-
-/**
- * i40e_aq_set_oem_mode
- * @hw: pointer to the HW structure.
- * @oem_mode: the OEM mode to be used
- *
- * Sets the device to a specific operating mode. Currently the only supported
- * mode is no_clp, which causes FW to refrain from using Alternate RAM.
- *
- **/
-i40e_status i40e_aq_set_oem_mode(struct i40e_hw *hw,
- u8 oem_mode)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_alternate_write_done *cmd =
- (struct i40e_aqc_alternate_write_done *)&desc.params.raw;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_alternate_set_mode);
-
- cmd->cmd_flags = CPU_TO_LE16(oem_mode);
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
-
- return status;
-}
-
/**
* i40e_aq_resume_port_tx
* @hw: pointer to the hardware structure
{
hw->bus.type = i40e_bus_type_pci_express;
- switch (link_status & I40E_PCI_LINK_WIDTH) {
- case I40E_PCI_LINK_WIDTH_1:
+ switch (link_status & PCI_EXP_LNKSTA_NLW) {
+ case PCI_EXP_LNKSTA_NLW_X1:
hw->bus.width = i40e_bus_width_pcie_x1;
break;
- case I40E_PCI_LINK_WIDTH_2:
+ case PCI_EXP_LNKSTA_NLW_X2:
hw->bus.width = i40e_bus_width_pcie_x2;
break;
- case I40E_PCI_LINK_WIDTH_4:
+ case PCI_EXP_LNKSTA_NLW_X4:
hw->bus.width = i40e_bus_width_pcie_x4;
break;
- case I40E_PCI_LINK_WIDTH_8:
+ case PCI_EXP_LNKSTA_NLW_X8:
hw->bus.width = i40e_bus_width_pcie_x8;
break;
default:
break;
}
- switch (link_status & I40E_PCI_LINK_SPEED) {
- case I40E_PCI_LINK_SPEED_2500:
+ switch (link_status & PCI_EXP_LNKSTA_CLS) {
+ case PCI_EXP_LNKSTA_CLS_2_5GB:
hw->bus.speed = i40e_bus_speed_2500;
break;
- case I40E_PCI_LINK_SPEED_5000:
+ case PCI_EXP_LNKSTA_CLS_5_0GB:
hw->bus.speed = i40e_bus_speed_5000;
break;
- case I40E_PCI_LINK_SPEED_8000:
+ case PCI_EXP_LNKSTA_CLS_8_0GB:
hw->bus.speed = i40e_bus_speed_8000;
break;
default:
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
- if (bwd_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
-
desc.datalen = CPU_TO_LE16(bwd_size);
status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details);
}
/**
- * i40e_read_phy_register
+ * i40e_read_phy_register_clause22
+ * @hw: pointer to the HW structure
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value)
+{
+ i40e_status status = I40E_ERR_TIMEOUT;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ u32 command = 0;
+ u16 retry = 1000;
+
+ command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
+ (I40E_MDIO_CLAUSE22_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK);
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ udelay(10);
+ retry--;
+ } while (retry);
+
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't write command to external PHY.\n");
+ } else {
+ command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
+ *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
+ I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_write_phy_register_clause22
+ * @hw: pointer to the HW structure
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes specified PHY register value
+ **/
+i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value)
+{
+ i40e_status status = I40E_ERR_TIMEOUT;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ u32 command = 0;
+ u16 retry = 1000;
+
+ command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
+ wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
+
+ command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
+ (I40E_MDIO_CLAUSE22_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK);
+
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ udelay(10);
+ retry--;
+ } while (retry);
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register_clause45
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
*
* Reads specified PHY register value
**/
-i40e_status i40e_read_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr,
- u16 *value)
+i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
{
i40e_status status = I40E_ERR_TIMEOUT;
u32 command = 0;
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_ADDRESS) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_READ) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
status = I40E_ERR_TIMEOUT;
}
/**
- * i40e_write_phy_register
+ * i40e_write_phy_register_clause45
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
*
* Writes value to specified PHY register
**/
-i40e_status i40e_write_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr,
- u16 value)
+i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
{
i40e_status status = I40E_ERR_TIMEOUT;
u32 command = 0;
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_ADDRESS) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_WRITE) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
status = I40E_ERR_TIMEOUT;
return status;
}
+/**
+ * i40e_write_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes value to specified PHY register
+ **/
+i40e_status i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
+{
+ i40e_status status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_write_phy_register_clause22(hw,
+ reg, phy_addr, value);
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_write_phy_register_clause45(hw,
+ page, reg, phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+i40e_status i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
+{
+ i40e_status status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
+ value);
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_read_phy_register_clause45(hw, page, reg,
+ phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
/**
* i40e_get_phy_address
* @hw: pointer to the HW structure
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
led_addr++) {
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ &led_reg);
if (status)
goto phy_blinking_end;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register(hw,
+ status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr,
led_reg);
if (time > 0 && interval > 0) {
for (i = 0; i < time * 1000; i += interval) {
- status = i40e_read_phy_register(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- &led_reg);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
goto restore_config;
if (led_reg & I40E_PHY_LED_MANUAL_ON)
led_reg = 0;
else
led_reg = I40E_PHY_LED_MANUAL_ON;
- status = i40e_write_phy_register(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- led_reg);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
if (status)
goto restore_config;
msleep(interval);
}
restore_config:
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, led_ctl);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_ctl);
phy_blinking_end:
return status;
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
temp_addr++) {
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- temp_addr, phy_addr, ®_val);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ temp_addr, phy_addr,
+ ®_val);
if (status)
return status;
*val = reg_val;
i = rd32(hw, I40E_PFGEN_PORTNUM);
port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
phy_addr = i40e_get_phy_address(hw, port_num);
-
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
return status;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ led_reg);
if (status)
return status;
}
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
goto restore_config;
if (on)
led_reg = I40E_PHY_LED_MANUAL_ON;
else
led_reg = 0;
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+ status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
if (status)
goto restore_config;
if (mode & I40E_PHY_LED_MODE_ORIG) {
led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
- status = i40e_write_phy_register(hw,
+ status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_ctl);
}
return status;
restore_config:
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, led_ctl);
+ status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_ctl);
return status;
}
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
desc.params.external.addr_high =
CPU_TO_LE32(upper_32_bits((u64)proxy_config));
desc.params.external.addr_low =
CPU_TO_LE32(lower_32_bits((u64)proxy_config));
+ desc.datalen = sizeof(struct i40e_aqc_arp_proxy_data);
status = i40e_asq_send_command(hw, &desc, proxy_config,
sizeof(struct i40e_aqc_arp_proxy_data),
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_ns_proxy_table_entry);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
desc.params.external.addr_high =
CPU_TO_LE32(upper_32_bits((u64)ns_proxy_table_entry));
desc.params.external.addr_low =
CPU_TO_LE32(lower_32_bits((u64)ns_proxy_table_entry));
+ desc.datalen = sizeof(struct i40e_aqc_ns_proxy_data);
status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry,
sizeof(struct i40e_aqc_ns_proxy_data),
if (set_filter) {
if (!filter)
return I40E_ERR_PARAM;
+
cmd_flags |= I40E_AQC_SET_WOL_FILTER;
- buff_len = sizeof(*filter);
+ cmd_flags |= I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR;
}
+
if (no_wol_tco)
cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL;
cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
cmd->valid_flags = CPU_TO_LE16(valid_flags);
+ buff_len = sizeof(*filter);
+ desc.datalen = buff_len;
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
cmd->address_high = CPU_TO_LE32(upper_32_bits((u64)filter));
cmd->address_low = CPU_TO_LE32(lower_32_bits((u64)filter));
return status;
}
+/**
+* i40e_aq_clear_all_wol_filters
+* @hw: pointer to the hw struct
+* @cmd_details: pointer to command details structure or NULL
+*
+* Get information for the reason of a Wake Up event
+**/
+i40e_status i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_clear_all_wol_filters);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
return ret;
}
-/**
- * i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
- * @tlv: Fill the ETS config data in IEEE format
- * @dcbcfg: Local store which holds the DCB Config
- *
- * Prepare IEEE 802.1Qaz ETS CFG TLV
- **/
-static void i40e_add_ieee_ets_tlv(struct i40e_lldp_org_tlv *tlv,
- struct i40e_dcbx_config *dcbcfg)
-{
- u8 priority0, priority1, maxtcwilling = 0;
- struct i40e_dcb_ets_config *etscfg;
- u16 offset = 0, typelength, i;
- u8 *buf = tlv->tlvinfo;
- u32 ouisubtype;
-
- typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
- I40E_IEEE_ETS_TLV_LENGTH);
- tlv->typelength = htons(typelength);
-
- ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
- I40E_IEEE_SUBTYPE_ETS_CFG);
- tlv->ouisubtype = I40E_HTONL(ouisubtype);
-
- /* First Octet post subtype
- * --------------------------
- * |will-|CBS | Re- | Max |
- * |ing | |served| TCs |
- * --------------------------
- * |1bit | 1bit|3 bits|3bits|
- */
- etscfg = &dcbcfg->etscfg;
- if (etscfg->willing)
- maxtcwilling = BIT(I40E_IEEE_ETS_WILLING_SHIFT);
- maxtcwilling |= etscfg->maxtcs & I40E_IEEE_ETS_MAXTC_MASK;
- buf[offset] = maxtcwilling;
-
- /* Move offset to Priority Assignment Table */
- offset++;
-
- /* Priority Assignment Table (4 octets)
- * Octets:| 1 | 2 | 3 | 4 |
- * -----------------------------------------
- * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
- * -----------------------------------------
- * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
- * -----------------------------------------
- */
- for (i = 0; i < 4; i++) {
- priority0 = etscfg->prioritytable[i * 2] & 0xF;
- priority1 = etscfg->prioritytable[i * 2 + 1] & 0xF;
- buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
- priority1;
- offset++;
- }
-
- /* TC Bandwidth Table (8 octets)
- * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
- * ---------------------------------
- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
- * ---------------------------------
- */
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
- buf[offset++] = etscfg->tcbwtable[i];
-
- /* TSA Assignment Table (8 octets)
- * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
- * ---------------------------------
- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
- * ---------------------------------
- */
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
- buf[offset++] = etscfg->tsatable[i];
-}
-
-/**
- * i40e_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format
- * @tlv: Fill ETS Recommended TLV in IEEE format
- * @dcbcfg: Local store which holds the DCB Config
- *
- * Prepare IEEE 802.1Qaz ETS REC TLV
- **/
-static void i40e_add_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
- struct i40e_dcbx_config *dcbcfg)
-{
- struct i40e_dcb_ets_config *etsrec;
- u16 offset = 0, typelength, i;
- u8 priority0, priority1;
- u8 *buf = tlv->tlvinfo;
- u32 ouisubtype;
-
- typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
- I40E_IEEE_ETS_TLV_LENGTH);
- tlv->typelength = htons(typelength);
-
- ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
- I40E_IEEE_SUBTYPE_ETS_REC);
- tlv->ouisubtype = I40E_HTONL(ouisubtype);
-
- etsrec = &dcbcfg->etsrec;
- /* First Octet is reserved */
- /* Move offset to Priority Assignment Table */
- offset++;
-
- /* Priority Assignment Table (4 octets)
- * Octets:| 1 | 2 | 3 | 4 |
- * -----------------------------------------
- * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
- * -----------------------------------------
- * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
- * -----------------------------------------
- */
- for (i = 0; i < 4; i++) {
- priority0 = etsrec->prioritytable[i * 2] & 0xF;
- priority1 = etsrec->prioritytable[i * 2 + 1] & 0xF;
- buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
- priority1;
- offset++;
- }
-
- /* TC Bandwidth Table (8 octets)
- * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
- * ---------------------------------
- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
- * ---------------------------------
- */
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
- buf[offset++] = etsrec->tcbwtable[i];
-
- /* TSA Assignment Table (8 octets)
- * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
- * ---------------------------------
- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
- * ---------------------------------
- */
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
- buf[offset++] = etsrec->tsatable[i];
-}
-
- /**
- * i40e_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format
- * @tlv: Fill PFC TLV in IEEE format
- * @dcbcfg: Local store to get PFC CFG data
- *
- * Prepare IEEE 802.1Qaz PFC CFG TLV
- **/
-static void i40e_add_ieee_pfc_tlv(struct i40e_lldp_org_tlv *tlv,
- struct i40e_dcbx_config *dcbcfg)
-{
- u8 *buf = tlv->tlvinfo;
- u32 ouisubtype;
- u16 typelength;
-
- typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
- I40E_IEEE_PFC_TLV_LENGTH);
- tlv->typelength = htons(typelength);
-
- ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
- I40E_IEEE_SUBTYPE_PFC_CFG);
- tlv->ouisubtype = I40E_HTONL(ouisubtype);
-
- /* ----------------------------------------
- * |will-|MBC | Re- | PFC | PFC Enable |
- * |ing | |served| cap | |
- * -----------------------------------------
- * |1bit | 1bit|2 bits|4bits| 1 octet |
- */
- if (dcbcfg->pfc.willing)
- buf[0] = BIT(I40E_IEEE_PFC_WILLING_SHIFT);
-
- if (dcbcfg->pfc.mbc)
- buf[0] |= BIT(I40E_IEEE_PFC_MBC_SHIFT);
-
- buf[0] |= dcbcfg->pfc.pfccap & 0xF;
- buf[1] = dcbcfg->pfc.pfcenable;
-}
-
-/**
- * i40e_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format
- * @tlv: Fill APP TLV in IEEE format
- * @dcbcfg: Local store to get APP CFG data
- *
- * Prepare IEEE 802.1Qaz APP CFG TLV
- **/
-static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv,
- struct i40e_dcbx_config *dcbcfg)
-{
- u16 typelength, length, offset = 0;
- u8 priority, selector, i = 0;
- u8 *buf = tlv->tlvinfo;
- u32 ouisubtype;
-
- /* No APP TLVs then just return */
- if (dcbcfg->numapps == 0)
- return;
- ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
- I40E_IEEE_SUBTYPE_APP_PRI);
- tlv->ouisubtype = I40E_HTONL(ouisubtype);
-
- /* Move offset to App Priority Table */
- offset++;
- /* Application Priority Table (3 octets)
- * Octets:| 1 | 2 | 3 |
- * -----------------------------------------
- * |Priority|Rsrvd| Sel | Protocol ID |
- * -----------------------------------------
- * Bits:|23 21|20 19|18 16|15 0|
- * -----------------------------------------
- */
- while (i < dcbcfg->numapps) {
- priority = dcbcfg->app[i].priority & 0x7;
- selector = dcbcfg->app[i].selector & 0x7;
- buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector;
- buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF;
- buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF;
- /* Move to next app */
- offset += 3;
- i++;
- if (i >= I40E_DCBX_MAX_APPS)
- break;
- }
- /* length includes size of ouisubtype + 1 reserved + 3*numapps */
- length = sizeof(tlv->ouisubtype) + 1 + (i*3);
- typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
- (length & 0x1FF));
- tlv->typelength = htons(typelength);
-}
-
- /**
- * i40e_add_dcb_tlv - Add all IEEE TLVs
- * @tlv: pointer to org tlv
- *
- * add tlv information
- **/
-static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,
- struct i40e_dcbx_config *dcbcfg,
- u16 tlvid)
-{
- switch (tlvid) {
- case I40E_IEEE_TLV_ID_ETS_CFG:
- i40e_add_ieee_ets_tlv(tlv, dcbcfg);
- break;
- case I40E_IEEE_TLV_ID_ETS_REC:
- i40e_add_ieee_etsrec_tlv(tlv, dcbcfg);
- break;
- case I40E_IEEE_TLV_ID_PFC_CFG:
- i40e_add_ieee_pfc_tlv(tlv, dcbcfg);
- break;
- case I40E_IEEE_TLV_ID_APP_PRI:
- i40e_add_ieee_app_pri_tlv(tlv, dcbcfg);
- break;
- default:
- break;
- }
-}
-
- /**
- * i40e_set_dcb_config - Set the local LLDP MIB to FW
- * @hw: pointer to the hw struct
- *
- * Set DCB configuration to the Firmware
- **/
-i40e_status i40e_set_dcb_config(struct i40e_hw *hw)
-{
- i40e_status ret = I40E_SUCCESS;
- struct i40e_dcbx_config *dcbcfg;
- struct i40e_virt_mem mem;
- u8 mib_type, *lldpmib;
- u16 miblen;
-
- /* update the hw local config */
- dcbcfg = &hw->local_dcbx_config;
- /* Allocate the LLDPDU */
- ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
- if (ret)
- return ret;
-
- mib_type = SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB;
- if (dcbcfg->app_mode == I40E_DCBX_APPS_NON_WILLING) {
- mib_type |= SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS <<
- SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT;
- }
- lldpmib = (u8 *)mem.va;
- ret = i40e_dcb_config_to_lldp(lldpmib, &miblen, dcbcfg);
- ret = i40e_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, NULL);
-
- i40e_free_virt_mem(hw, &mem);
- return ret;
-}
-
-/**
- * i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format
- * @hw: pointer to the hw struct
- * @dcbcfg: store for LLDPDU data
- *
- * send DCB configuration to FW
- **/
-i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
- struct i40e_dcbx_config *dcbcfg)
-{
- u16 length, offset = 0, tlvid = I40E_TLV_ID_START;
- i40e_status ret = I40E_SUCCESS;
- struct i40e_lldp_org_tlv *tlv;
- u16 typelength;
-
- tlv = (struct i40e_lldp_org_tlv *)lldpmib;
- while (1) {
- i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++);
- typelength = ntohs(tlv->typelength);
- length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
- if (length)
- offset += length + 2;
- /* END TLV or beyond LLDPDU size */
- if ((tlvid >= I40E_TLV_ID_END_OF_LLDPPDU) ||
- (offset > I40E_LLDPDU_SIZE))
- break;
- /* Move to next TLV */
- if (length)
- tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
- sizeof(tlv->typelength) + length);
- }
- *miblen = offset;
- return ret;
-}
-
/**
* i40e_read_lldp_cfg - read LLDP Configuration data from NVM
* @hw: pointer to the HW structure
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#include "i40e_type.h"
-#define I40E_DCBX_OFFLOAD_DISABLED 0
-#define I40E_DCBX_OFFLOAD_ENABLED 1
-
#define I40E_DCBX_STATUS_NOT_STARTED 0
#define I40E_DCBX_STATUS_IN_PROGRESS 1
#define I40E_DCBX_STATUS_DONE 2
#define I40E_CEE_SUBTYPE_APP_PRI 4
#define I40E_CEE_MAX_FEAT_TYPE 3
-#define I40E_LLDP_ADMINSTATUS_DISABLED 0
-#define I40E_LLDP_ADMINSTATUS_ENABLED_RX 1
-#define I40E_LLDP_ADMINSTATUS_ENABLED_TX 2
-#define I40E_LLDP_ADMINSTATUS_ENABLED_RXTX 3
-
/* Defines for LLDP TLV header */
#define I40E_LLDP_TLV_LEN_SHIFT 0
#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
/* Defines for IEEE TSA types */
#define I40E_IEEE_TSA_STRICT 0
-#define I40E_IEEE_TSA_CBS 1
#define I40E_IEEE_TSA_ETS 2
-#define I40E_IEEE_TSA_VENDOR 255
/* Defines for IEEE PFC TLV */
#define I40E_IEEE_PFC_CAP_SHIFT 0
#pragma pack(1)
-/* IEEE 802.1AB LLDP TLV structure */
-struct i40e_lldp_generic_tlv {
- __be16 typelength;
- u8 tlvinfo[1];
-};
-
/* IEEE 802.1AB LLDP Organization specific TLV */
struct i40e_lldp_org_tlv {
__be16 typelength;
};
#pragma pack()
-/*
- * TODO: The below structures related LLDP/DCBX variables
- * and statistics are defined but need to find how to get
- * the required information from the Firmware to use them
- */
-
-/* IEEE 802.1AB LLDP Agent Statistics */
-struct i40e_lldp_stats {
- u64 remtablelastchangetime;
- u64 remtableinserts;
- u64 remtabledeletes;
- u64 remtabledrops;
- u64 remtableageouts;
- u64 txframestotal;
- u64 rxframesdiscarded;
- u64 rxportframeerrors;
- u64 rxportframestotal;
- u64 rxporttlvsdiscardedtotal;
- u64 rxporttlvsunrecognizedtotal;
- u64 remtoomanyneighbors;
-};
-
-/* IEEE 802.1Qaz DCBX variables */
-struct i40e_dcbx_variables {
- u32 defmaxtrafficclasses;
- u32 defprioritytcmapping;
- u32 deftcbandwidth;
- u32 deftsaassignment;
-};
-
i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
u16 *status);
i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
struct i40e_dcbx_config *dcbcfg);
i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
i40e_status i40e_init_dcb(struct i40e_hw *hw);
-i40e_status i40e_set_dcb_config(struct i40e_hw *hw);
-i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
- struct i40e_dcbx_config *dcbcfg);
#endif /* _I40E_DCB_H_ */
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* Delete given APP from all the VSIs for given PF
**/
static void i40e_dcbnl_del_app(struct i40e_pf *pf,
- struct i40e_dcb_app_priority_table *app)
+ struct i40e_dcb_app_priority_table *app)
{
int v, err;
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
return len;
}
+static char *i40e_filter_state_string[] = {
+ "INVALID",
+ "NEW",
+ "ACTIVE",
+ "FAILED",
+ "REMOVE",
+};
+
+/**
+ * i40e_dbg_dump_vsi_filters - handles dump of mac/vlan filters for a VSI
+ * @pf: the i40e_pf created in command write
+ * @vsi: the vsi to dump
+ */
+static void i40e_dbg_dump_vsi_filters(struct i40e_pf *pf, struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ int bkt;
+
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+ dev_info(&pf->pdev->dev,
+ " mac_filter_hash: %pM vid=%d, state %s\n",
+ f->macaddr, f->vlan,
+ i40e_filter_state_string[f->state]);
+ }
+ dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n",
+ vsi->active_filters, vsi->promisc_threshold,
+ (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) ?
+ "ON" : "OFF"));
+}
+
+/**
+ * i40e_dbg_dump_all_vsi_filters - dump mac/vlan filters for all VSI on a PF
+ * @pf: the i40e_pf created in command write
+ */
+static void i40e_dbg_dump_all_vsi_filters(struct i40e_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++)
+ if (pf->vsi[i]) {
+ dev_info(&pf->pdev->dev, "vsi seid %d\n",
+ pf->vsi[i]->seid);
+ i40e_dbg_dump_vsi_filters(pf, pf->vsi[i]);
+ }
+}
+
/**
* i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
* @pf: the i40e_pf created in command write
#else
struct net_device_stats *nstat;
#endif
- struct i40e_mac_filter *f;
struct i40e_vsi *vsi;
int i;
dev_info(&pf->pdev->dev,
" vlgrp: & = %p\n", vsi->vlgrp);
#else
- dev_info(&pf->pdev->dev, " vlgrp: & = %p\n", vsi->active_vlans);
+ dev_info(&pf->pdev->dev,
+ " vlgrp: & = %p\n", vsi->active_vlans);
#endif /* HAVE_VLAN_RX_REGISTER */
dev_info(&pf->pdev->dev,
" state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
pf->hw.mac.addr,
pf->hw.mac.san_addr,
pf->hw.mac.port_addr);
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- dev_info(&pf->pdev->dev,
- " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n",
- f->macaddr, f->vlan, f->is_netdev, f->is_vf,
- f->counter);
- }
+ i40e_dbg_dump_vsi_filters(pf, vsi);
nstat = i40e_get_vsi_stats_struct(vsi);
dev_info(&pf->pdev->dev,
" net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
rx_ring->queue_index,
rx_ring->reg_idx);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_buf_len = %d, dtype = %d\n",
- i,
- rx_ring->rx_buf_len,
- 0);
+ " rx_rings[%i]: rx_buf_len = %d\n",
+ i, rx_ring->rx_buf_len);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
i,
" rx_rings[%i]: vsi = %p, q_vector = %p\n",
i, rx_ring->vsi,
rx_ring->q_vector);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_itr_setting = %d (%s)\n",
+ i, rx_ring->rx_itr_setting,
+ ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ?
+ "dynamic" : "fixed");
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
i, tx_ring->state,
tx_ring->queue_index,
tx_ring->reg_idx);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: dtype = %d\n",
- i, 0);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
i,
dev_info(&pf->pdev->dev,
" tx_rings[%i]: DCB tc = %d\n",
i, tx_ring->dcb_tc);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_itr_setting = %d (%s)\n",
+ i, tx_ring->tx_itr_setting,
+ ITR_IS_DYNAMIC(tx_ring->tx_itr_setting) ?
+ "dynamic" : "fixed");
}
rcu_read_unlock();
dev_info(&pf->pdev->dev,
- " work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
- vsi->work_limit, vsi->rx_itr_setting,
- ITR_IS_DYNAMIC(vsi->rx_itr_setting) ? "dynamic" : "fixed",
- vsi->tx_itr_setting,
- ITR_IS_DYNAMIC(vsi->tx_itr_setting) ? "dynamic" : "fixed");
+ " work_limit = %d\n",
+ vsi->work_limit);
dev_info(&pf->pdev->dev,
" max_frame = %d, rx_buf_len = %d dtype = %d\n",
vsi->max_frame, vsi->rx_buf_len, 0);
dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
i40e_veb_release(pf->veb[i]);
-
- } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
- struct i40e_mac_filter *f;
- int vlan = 0;
- u8 ma[6];
- int ret;
-
- cnt = sscanf(&cmd_buf[11],
- "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
- &vsi_seid,
- &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
- &vlan);
- if (cnt == 7) {
- vlan = 0;
- } else if (cnt != 8) {
- dev_info(&pf->pdev->dev,
- "add macaddr: bad command string, cnt=%d\n",
- cnt);
- goto command_write_done;
- }
-
- vsi = i40e_dbg_find_vsi(pf, vsi_seid);
- if (!vsi) {
- dev_info(&pf->pdev->dev,
- "add macaddr: VSI %d not found\n", vsi_seid);
- goto command_write_done;
- }
-
- spin_lock_bh(&vsi->mac_filter_list_lock);
- f = i40e_add_filter(vsi, ma, vlan, false, false);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- ret = i40e_sync_vsi_filters(vsi);
- if (f && !ret)
- dev_info(&pf->pdev->dev,
- "add macaddr: %pM vlan=%d added to VSI %d\n",
- ma, vlan, vsi_seid);
- else
- dev_info(&pf->pdev->dev,
- "add macaddr: %pM vlan=%d to VSI %d failed, f=%p ret=%d\n",
- ma, vlan, vsi_seid, f, ret);
-
- } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
- int vlan = 0;
- u8 ma[6];
- int ret;
-
- cnt = sscanf(&cmd_buf[11],
- "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
- &vsi_seid,
- &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
- &vlan);
- if (cnt == 7) {
- vlan = 0;
- } else if (cnt != 8) {
- dev_info(&pf->pdev->dev,
- "del macaddr: bad command string, cnt=%d\n",
- cnt);
- goto command_write_done;
- }
-
- vsi = i40e_dbg_find_vsi(pf, vsi_seid);
- if (!vsi) {
- dev_info(&pf->pdev->dev,
- "del macaddr: VSI %d not found\n", vsi_seid);
- goto command_write_done;
- }
-
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_del_filter(vsi, ma, vlan, false, false);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- ret = i40e_sync_vsi_filters(vsi);
- if (!ret)
- dev_info(&pf->pdev->dev,
- "del macaddr: %pM vlan=%d removed from VSI %d\n",
- ma, vlan, vsi_seid);
- else
- dev_info(&pf->pdev->dev,
- "del macaddr: %pM vlan=%d from VSI %d failed, ret=%d\n",
- ma, vlan, vsi_seid, ret);
-
} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
i40e_status ret;
u16 vid;
int v;
- cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
+ cnt = sscanf(&cmd_buf[8], "%i %d", &vsi_seid, &v);
if (cnt != 2) {
dev_info(&pf->pdev->dev,
"add pvid: bad command string, cnt=%d\n", cnt);
struct i40e_dcbx_config *d_cfg =
&pf->hw.desired_dcbx_config;
int i, ret;
+ u32 switch_id;
bw_data = kzalloc(sizeof(
struct i40e_aqc_query_port_ets_config_resp),
goto command_write_done;
}
+ vsi = pf->vsi[pf->lan_vsi];
+ switch_id =
+ vsi->info.switch_id & I40E_AQ_VSI_SW_ID_MASK;
+
ret = i40e_aq_query_port_ets_config(&pf->hw,
- pf->mac_seid,
+ switch_id,
bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
cloud_node) {
i40e_dbg_dump_cloud_filter(pf, c_rule);
}
+ i40e_dbg_dump_all_vsi_filters(pf);
} else {
dev_info(&pf->pdev->dev,
"dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
buff = NULL;
kfree(desc);
desc = NULL;
- } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
- (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
- struct i40e_fdir_filter fd_data;
- u16 packet_len, i, j = 0;
- char *asc_packet;
- u8 *raw_packet;
- bool add = false;
- int ret;
-
- if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
- goto command_write_done;
-
- if (strncmp(cmd_buf, "add", 3) == 0)
- add = true;
-
- if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
- goto command_write_done;
-
- asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
- GFP_KERNEL);
- if (!asc_packet)
- goto command_write_done;
-
- raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
- GFP_KERNEL);
-
- if (!raw_packet) {
- kfree(asc_packet);
- asc_packet = NULL;
- goto command_write_done;
- }
-
- cnt = sscanf(&cmd_buf[13],
- "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
- &fd_data.q_index,
- &fd_data.flex_off, &fd_data.pctype,
- &fd_data.dest_vsi, &fd_data.dest_ctl,
- &fd_data.fd_status, &fd_data.cnt_index,
- &fd_data.fd_id, &packet_len, asc_packet);
- if (cnt != 10) {
- dev_info(&pf->pdev->dev,
- "program fd_filter: bad command string, cnt=%d\n",
- cnt);
- kfree(asc_packet);
- asc_packet = NULL;
- kfree(raw_packet);
- goto command_write_done;
- }
-
- /* fix packet length if user entered 0 */
- if (packet_len == 0)
- packet_len = I40E_FDIR_MAX_RAW_PACKET_SIZE;
-
- /* make sure to check the max as well */
- packet_len = min_t(u16,
- packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
- printk("packet in ascii %s\n", asc_packet);
-
- for (i = 0; i < packet_len; i++) {
- cnt = sscanf(&asc_packet[j], "%2hhx ", &raw_packet[i]);
- if (!cnt)
- break;
- j += 3;
- }
- dev_info(&pf->pdev->dev, "FD raw packet dump\n");
- print_hex_dump(KERN_INFO, "FD raw packet: ",
- DUMP_PREFIX_OFFSET, 16, 1,
- raw_packet, packet_len, true);
- ret = i40e_program_fdir_filter(&fd_data, raw_packet, pf, add);
- if (!ret) {
- dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
- } else {
- dev_info(&pf->pdev->dev,
- "Filter command send failed %d\n", ret);
- }
- kfree(raw_packet);
- raw_packet = NULL;
- kfree(asc_packet);
- asc_packet = NULL;
} else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
i40e_get_current_fd_count(pf));
+ /* vf base mode on/off hooks needs to be used by validation only to
+ * make sure vf base mode driver is not broken
+ */
+ } else if (strncmp(cmd_buf, "vf base mode on", 15) == 0) {
+ if (!pf->num_alloc_vfs) {
+ pf->vf_base_mode_only = true;
+ dev_info(&pf->pdev->dev, "VF Base mode is enabled\n");
+ } else
+ dev_info(&pf->pdev->dev,
+ "cannot configure VF Base mode when VFs are allocated\n");
+ } else if (strncmp(cmd_buf, "vf base mode off", 16) == 0) {
+ if (!pf->num_alloc_vfs) {
+ pf->vf_base_mode_only = false;
+ dev_info(&pf->pdev->dev, "VF Base mode is disabled\n");
+ } else
+ dev_info(&pf->pdev->dev,
+ "cannot configure VF Base mode when VFs are allocated\n");
} else if ((strncmp(cmd_buf, "add ethtype filter", 18) == 0) ||
(strncmp(cmd_buf, "rem ethtype filter", 18) == 0)) {
u16 ethtype;
bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
/* Get the bw's */
- cnt = sscanf(&cmd_buf[7], "%d %d", &max_bw, &min_bw);
+ cnt = sscanf(&cmd_buf[7], "%u %u", &max_bw, &min_bw);
if (cnt != 2) {
dev_info(&pf->pdev->dev,"set bw <MAX> <MIN>\n");
goto command_write_done;
dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n");
dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n");
dev_info(&pf->pdev->dev, " del relay <relay_seid>\n");
- dev_info(&pf->pdev->dev, " add macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
- dev_info(&pf->pdev->dev, " del macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n");
dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n");
dev_info(&pf->pdev->dev, " dump switch\n");
dev_info(&pf->pdev->dev, " defport off\n");
dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
- dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
- dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " fd current cnt");
+ dev_info(&pf->pdev->dev, " vf base mode on\n");
+ dev_info(&pf->pdev->dev, " vf base mode off\n");
dev_info(&pf->pdev->dev, " add ethtype filter <ethtype> <to_queue>");
dev_info(&pf->pdev->dev, " rem ethtype filter <ethtype> <to_queue>");
dev_info(&pf->pdev->dev, " lldp start\n");
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#define _I40E_DEVIDS_H_
-/* Vendor ID */
-#define I40E_INTEL_VENDOR_ID 0x8086
-
/* Device IDs */
#define I40E_DEV_ID_SFP_XL710 0x1572
#define I40E_DEV_ID_QEMU 0x1574
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
#define I40E_DEV_ID_SFP_I_X722 0x37D3
-#define I40E_DEV_ID_QSFP_I_X722 0x37D4
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#include "i40e_diag.h"
#include "i40e_prototype.h"
-/**
- * i40e_diag_set_loopback
- * @hw: pointer to the hw struct
- * @mode: loopback mode
- *
- * Set chosen loopback mode
- **/
-i40e_status i40e_diag_set_loopback(struct i40e_hw *hw,
- enum i40e_lb_mode mode)
-{
- i40e_status ret_code = I40E_SUCCESS;
-
- if (i40e_aq_set_lb_modes(hw, mode, NULL))
- ret_code = I40E_ERR_DIAG_TEST_FAILED;
-
- return ret_code;
-}
-
/**
* i40e_diag_reg_pattern_test
* @hw: pointer to the hw struct
else
return I40E_ERR_DIAG_TEST_FAILED;
}
-
-/**
- * i40e_diag_fw_alive_test
- * @hw: pointer to the hw struct
- *
- * Perform FW alive diagnostic test
- **/
-i40e_status i40e_diag_fw_alive_test(struct i40e_hw *hw)
-{
- return I40E_SUCCESS;
-}
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
extern struct i40e_diag_reg_test_info i40e_reg_list[];
-i40e_status i40e_diag_set_loopback(struct i40e_hw *hw,
- enum i40e_lb_mode mode);
-i40e_status i40e_diag_fw_alive_test(struct i40e_hw *hw);
i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
I40E_ETH_TEST_REG = 0,
I40E_ETH_TEST_EEPROM,
I40E_ETH_TEST_INTR,
- I40E_ETH_TEST_LOOPBACK,
I40E_ETH_TEST_LINK,
};
"Register test (offline)",
"Eeprom test (offline)",
"Interrupt test (offline)",
- "Loopback test (offline)",
"Link test (on/offline)"
};
struct ethtool_rxnfc *cmd);
#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
-static const char i40e_priv_flags_strings_gl[][ETH_GSTRING_LEN] = {
+static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
"MFP",
"LinkPolling",
"flow-director-atr",
"veb-stats",
"hw-atr-eviction",
- "vf-true-promisc-support",
};
-#define I40E_PRIV_FLAGS_GL_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings_gl)
+#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
-static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
- "MFP",
- "LinkPolling",
- "flow-director-atr",
- "veb-stats",
- "hw-atr-eviction",
+/* Private flags with a global effect, restricted to PF 0 */
+static const char i40e_gl_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ "vf-true-promisc-support",
};
-#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
+#define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_priv_flags_strings)
#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
-
/**
* i40e_partition_setting_complaint - generic complaint for MFP restriction
* @pf: the PF struct
"The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n");
}
+/**
+ * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes
+ * @phy_types: phy types to convert
+ * @supported: pointer to the ethtool supported variable to fill in
+ * @advertising: pointer to the ethtool advertising variable to fill in
+ *
+ **/
+static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
+ u32 *advertising)
+{
+ struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info;
+ u64 phy_types = pf->hw.phy.phy_types;
+
+ *supported = 0x0;
+ *advertising = 0x0;
+
+ if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ *advertising |= ADVERTISED_1000baseT_Full;
+ if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
+ *supported |= SUPPORTED_100baseT_Full;
+ *advertising |= ADVERTISED_100baseT_Full;
+ }
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
+ phy_types & I40E_CAP_PHY_TYPE_XFI ||
+ phy_types & I40E_CAP_PHY_TYPE_SFI ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
+ *supported |= SUPPORTED_10000baseT_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_10000baseT_Full;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ *advertising |= ADVERTISED_10000baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
+ phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
+ *supported |= SUPPORTED_40000baseCR4_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_40000baseCR4_Full;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_40GB)
+ *advertising |= ADVERTISED_40000baseCR4_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_100baseT_Full;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+ *advertising |= ADVERTISED_100baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ *advertising |= ADVERTISED_1000baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
+ *supported |= SUPPORTED_40000baseSR4_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
+ *supported |= SUPPORTED_40000baseLR4_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
+ *supported |= SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_40000baseKR4_Full |
+ ADVERTISED_Autoneg;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
+ *supported |= SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_20GB)
+ *advertising |= ADVERTISED_20000baseKR2_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
+ if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER))
+ *supported |= SUPPORTED_10000baseKR_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER))
+ *advertising |= ADVERTISED_10000baseKR_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
+ *supported |= SUPPORTED_10000baseKX4_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ *advertising |= ADVERTISED_10000baseKX4_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
+ if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER))
+ *supported |= SUPPORTED_1000baseKX_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER))
+ *advertising |= ADVERTISED_1000baseKX_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR) {
+ *supported |= SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_Autoneg;
+ }
+}
+
/**
* i40e_get_settings_link_up - Get the Link settings for when link is up
* @hw: hw structure
{
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
u32 link_speed = hw_link_info->link_speed;
+ u32 e_advertising = 0x0;
+ u32 e_supported = 0x0;
/* Initialize supported and advertised settings based on phy settings */
switch (hw_link_info->phy_type) {
break;
case I40E_PHY_TYPE_10GBASE_T:
case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_100BASE_TX:
ecmd->supported = SUPPORTED_Autoneg |
SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full;
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full;
ecmd->advertising = ADVERTISED_Autoneg;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
ecmd->advertising |= ADVERTISED_10000baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
ecmd->advertising |= ADVERTISED_1000baseT_Full;
- /* adding 100baseT support for 10GBASET_PHY */
- if (pf->flags & I40E_FLAG_HAVE_10GBASET_PHY) {
- ecmd->supported |= SUPPORTED_100baseT_Full;
- ecmd->advertising |= ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_10000baseT_Full;
- }
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
break;
case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
ecmd->supported = SUPPORTED_Autoneg |
ecmd->advertising = ADVERTISED_Autoneg |
ADVERTISED_1000baseT_Full;
break;
- case I40E_PHY_TYPE_100BASE_TX:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_100baseT_Full;
- if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- /* firmware detects 10G phy as 100M phy at 100M speed */
- if (pf->flags & I40E_FLAG_HAVE_10GBASET_PHY) {
- ecmd->supported |= SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_10000baseT_Full;
- }
- break;
case I40E_PHY_TYPE_10GBASE_CR1_CU:
case I40E_PHY_TYPE_10GBASE_CR1:
ecmd->supported = SUPPORTED_Autoneg |
ecmd->advertising |= ADVERTISED_100baseT_Full;
}
break;
- /* Backplane is set based on supported phy types in get_settings
- * so don't set anything here but don't warn either
- */
case I40E_PHY_TYPE_40GBASE_KR4:
case I40E_PHY_TYPE_20GBASE_KR2:
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_10GBASE_KX4:
case I40E_PHY_TYPE_1000BASE_KX:
+ ecmd->supported |= SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_10000baseKR_Full |
+ SUPPORTED_10000baseKX4_Full |
+ SUPPORTED_1000baseKX_Full |
+ SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_40000baseKR4_Full |
+ ADVERTISED_20000baseKR2_Full |
+ ADVERTISED_10000baseKR_Full |
+ ADVERTISED_10000baseKX4_Full |
+ ADVERTISED_1000baseKX_Full |
+ ADVERTISED_Autoneg;
+ break;
+ case I40E_PHY_TYPE_25GBASE_KR:
+ case I40E_PHY_TYPE_25GBASE_CR:
+ case I40E_PHY_TYPE_25GBASE_SR:
+ case I40E_PHY_TYPE_25GBASE_LR:
+ ecmd->supported = SUPPORTED_Autoneg;
+ ecmd->advertising = ADVERTISED_Autoneg;
+ /* TODO: add speeds when ethtool is ready to support*/
break;
default:
/* if we got here and link is up something bad is afoot */
hw_link_info->phy_type);
}
+ /* Now that we've worked out everything that could be supported by the
+ * current phy type, get what is supported by the NVM and and them to
+ * get what is truly supported
+ */
+ i40e_phy_type_to_ethtool(pf, &e_supported,
+ &e_advertising);
+
+ ecmd->supported = ecmd->supported & e_supported;
+ ecmd->advertising = ecmd->advertising & e_advertising;
+
/* Set speed and duplex */
switch (link_speed) {
case I40E_LINK_SPEED_40GB:
ethtool_cmd_speed_set(ecmd, SPEED_40000);
break;
+ case I40E_LINK_SPEED_25GB:
+#ifdef SPEED_25000
+ ethtool_cmd_speed_set(ecmd, SPEED_25000);
+#else
+ netdev_info(netdev,
+ "Speed is 25G, display not supported by this version of ethtool.\n");
+#endif
+ break;
case I40E_LINK_SPEED_20GB:
ethtool_cmd_speed_set(ecmd, SPEED_20000);
break;
* Reports link settings that can be determined when link is down
**/
static void i40e_get_settings_link_down(struct i40e_hw *hw,
- struct ethtool_cmd *ecmd, struct i40e_pf *pf)
+ struct ethtool_cmd *ecmd,
+ struct i40e_pf *pf)
{
- enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types;
-
/* link is down and the driver needs to fall back on
* supported phy types to figure out what info to display
*/
- ecmd->supported = 0x0;
- ecmd->advertising = 0x0;
- if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full;
- if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
- ecmd->supported |= SUPPORTED_100baseT_Full;
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- }
- }
- if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
- phy_types & I40E_CAP_PHY_TYPE_XFI ||
- phy_types & I40E_CAP_PHY_TYPE_SFI ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
- ecmd->supported |= SUPPORTED_10000baseT_Full;
- if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full;
- }
- if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
- phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
- phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
- ecmd->supported |= SUPPORTED_40000baseCR4_Full;
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
- phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_40000baseCR4_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_40000baseCR4_Full;
- }
- if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
- !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_100baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_100baseT_Full;
- }
- if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
- phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
- phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
- phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full;
- }
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
- ecmd->supported |= SUPPORTED_40000baseSR4_Full;
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
- ecmd->supported |= SUPPORTED_40000baseLR4_Full;
+ i40e_phy_type_to_ethtool(pf, &ecmd->supported,
+ &ecmd->advertising);
/* With no link speed and duplex are unknown */
ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
i40e_get_settings_link_down(hw, ecmd, pf);
/* Now set the settings that don't rely on link being up/down */
-
- /* For backplane, supported and advertised are only reliant on the
- * phy types the NVM specifies are supported.
- */
- if (hw->device_id == I40E_DEV_ID_KX_B ||
- hw->device_id == I40E_DEV_ID_KX_C ||
- hw->device_id == I40E_DEV_ID_20G_KR2 ||
- hw->device_id == I40E_DEV_ID_20G_KR2_A) {
- ecmd->supported = SUPPORTED_Autoneg;
- ecmd->advertising = ADVERTISED_Autoneg;
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
- ecmd->supported |= SUPPORTED_40000baseKR4_Full;
- ecmd->advertising |= ADVERTISED_40000baseKR4_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
- ecmd->supported |= SUPPORTED_20000baseKR2_Full;
- ecmd->advertising |= ADVERTISED_20000baseKR2_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
- ecmd->supported |= SUPPORTED_10000baseKR_Full;
- ecmd->advertising |= ADVERTISED_10000baseKR_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
- ecmd->supported |= SUPPORTED_10000baseKX4_Full;
- ecmd->advertising |= ADVERTISED_10000baseKX4_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
- ecmd->supported |= SUPPORTED_1000baseKX_Full;
- ecmd->advertising |= ADVERTISED_1000baseKX_Full;
- }
- }
-
/* Set autoneg settings */
ecmd->autoneg = (hw_link_info->an_info & I40E_AQ_AN_COMPLETED ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE &&
+ hw->phy.media_type != I40E_MEDIA_TYPE_DA &&
hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
return -EOPNOTSUPP;
* that can disable it, so otherwise return error
*/
if (safe_ecmd.supported & SUPPORTED_Autoneg &&
- hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) {
+ hw->phy.link_info.phy_type !=
+ I40E_PHY_TYPE_10GBASE_T) {
netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
return -EINVAL;
}
if (change || (abilities.link_speed != config.link_speed)) {
/* copy over the rest of the abilities */
config.phy_type = abilities.phy_type;
+ config.phy_type_ext = abilities.phy_type_ext;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
/* save the requested speeds */
hw->phy.link_info.requested_speeds = config.link_speed;
static int i40e_set_tso(struct net_device *netdev, u32 data)
{
if (data) {
+#ifndef HAVE_NDO_FEATURES_CHECK
+ if (netdev->mtu >= 576) {
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ } else {
+ netdev_info(netdev, "MTU setting is too low to enable TSO\n");
+ }
+#else
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
+#endif
} else {
#ifndef HAVE_NETDEV_VLAN_FEATURES
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
u32 val;
+#define X722_EEPROM_SCOPE_LIMIT 0x5B9FFF
+ if (hw->mac.type == I40E_MAC_X722) {
+ val = X722_EEPROM_SCOPE_LIMIT + 1;
+ return val;
+ }
val = (rd32(hw, I40E_GLPCI_LBARCTRL)
& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
+ drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
if (pf->hw.pf_id == 0)
- drvinfo->n_priv_flags = I40E_PRIV_FLAGS_GL_STR_LEN;
- else
- drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
+ drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN;
#endif
}
return I40E_VSI_STATS_LEN(netdev);
}
case ETH_SS_PRIV_FLAGS:
- if (pf->hw.pf_id == 0)
- return I40E_PRIV_FLAGS_GL_STR_LEN;
- else
- return I40E_PRIV_FLAGS_STR_LEN;
+ return I40E_PRIV_FLAGS_STR_LEN +
+ (pf->hw.pf_id == 0 ? I40E_GL_PRIV_FLAGS_STR_LEN : 0);
default:
return -EOPNOTSUPP;
}
switch (stringset) {
case ETH_SS_TEST:
- for (i = 0; i < I40E_TEST_LEN; i++) {
- memcpy(data, i40e_gstrings_test[i], ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ memcpy(data, i40e_gstrings_test,
+ I40E_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
break;
#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
case ETH_SS_PRIV_FLAGS:
- if (pf->hw.pf_id == 0) {
- for (i = 0; i < I40E_PRIV_FLAGS_GL_STR_LEN; i++) {
- memcpy(data, i40e_priv_flags_strings_gl[i],
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
- } else {
- for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
- memcpy(data, i40e_priv_flags_strings[i],
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
- }
+ memcpy(data, i40e_priv_flags_strings,
+ I40E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+ data += I40E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN;
+ if (pf->hw.pf_id == 0)
+ memcpy(data, i40e_gl_priv_flags_strings,
+ I40E_GL_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
break;
#endif
default:
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
- BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ);
+
+ if (pf->flags & I40E_FLAG_PTP_L4_CAPABLE)
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
return 0;
#else /* HAVE_PTP_1588_CLOCK */
return *data;
}
-static int i40e_loopback_test(struct net_device *netdev, u64 *data)
-{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_pf *pf = np->vsi->back;
-
- netif_info(pf, hw, netdev, "loopback test not implemented\n");
- *data = 0;
-
- return *data;
-}
-
#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
static int i40e_diag_test_count(struct net_device *netdev)
{
static inline bool i40e_active_vmdqs(struct i40e_pf *pf)
{
- struct i40e_vsi **vsi = pf->vsi;
- int i;
-
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (!vsi[i])
- continue;
- if (vsi[i]->type == I40E_VSI_VMDQ2)
- return true;
- }
-
- return false;
+ return !!i40e_find_vsi_by_type(pf, I40E_VSI_VMDQ2);
}
static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_REG] = 1;
data[I40E_ETH_TEST_EEPROM] = 1;
data[I40E_ETH_TEST_INTR] = 1;
- data[I40E_ETH_TEST_LOOPBACK] = 1;
data[I40E_ETH_TEST_LINK] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__I40E_TESTING, &pf->state);
if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
/* run reg test last, a reset is required after it */
if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
eth_test->flags |= ETH_TEST_FL_FAILED;
data[I40E_ETH_TEST_REG] = 0;
data[I40E_ETH_TEST_EEPROM] = 0;
data[I40E_ETH_TEST_INTR] = 0;
- data[I40E_ETH_TEST_LOOPBACK] = 0;
}
skip_ol_tests:
switch (state) {
case ETHTOOL_ID_ACTIVE:
- if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
+ if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS)) {
pf->led_status = i40e_led_get(hw);
} else {
- i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_PORT, NULL);
+ i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL);
ret = i40e_led_get_phy(hw, &temp_status,
&pf->phy_led_val);
pf->led_status = temp_status;
}
return blink_freq;
case ETHTOOL_ID_ON:
- if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY))
+ if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS))
i40e_led_set(hw, 0xf, false);
else
ret = i40e_led_set_phy(hw, true, pf->led_status, 0);
break;
case ETHTOOL_ID_OFF:
- if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY))
+ if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS))
i40e_led_set(hw, 0x0, false);
else
ret = i40e_led_set_phy(hw, false, pf->led_status, 0);
break;
case ETHTOOL_ID_INACTIVE:
- if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
- i40e_led_set(hw, false, pf->led_status);
+ if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS)) {
+ i40e_led_set(hw, pf->led_status, false);
} else {
ret = i40e_led_set_phy(hw, false, pf->led_status,
(pf->phy_led_val |
u16 temp_status;
int i;
- if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
+ if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS)) {
pf->led_status = i40e_led_get(hw);
} else {
ret = i40e_led_get_phy(hw, &temp_status,
/* 10GBaseT PHY controls led's through PHY, not MAC */
for (i = 0; i < (data * 1000); i += 400) {
- if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY))
+ if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS))
i40e_led_set(hw, 0xF, false);
else
ret = i40e_led_set_phy(hw, true, pf->led_status, 0);
msleep_interruptible(200);
- if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY))
+ if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS))
i40e_led_set(hw, 0x0, false);
else
ret = i40e_led_set_phy(hw, false, pf->led_status, 0);
msleep_interruptible(200);
}
- if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY))
+ if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS))
i40e_led_set(hw, pf->led_status, false);
else
ret = i40e_led_set_phy(hw, false, pf->led_status,
* 125us (8000 interrupts per second) == ITR(62)
*/
-static int i40e_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+/**
+ * __i40e_get_coalesce - get per-queue coalesce settings
+ * @netdev: the netdev to check
+ * @ec: ethtool coalesce data structure
+ * @queue: which queue to pick
+ *
+ * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
+ * are per queue. If queue is <0 then we default to queue 0 as the
+ * representative value.
+ **/
+static int __i40e_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ int queue)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_ring *rx_ring, *tx_ring;
struct i40e_vsi *vsi = np->vsi;
ec->tx_max_coalesced_frames_irq = vsi->work_limit;
ec->rx_max_coalesced_frames_irq = vsi->work_limit;
- if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
+ /* rx and tx usecs has per queue value. If user doesn't specify the
+ * queue, return queue 0's value to represent.
+ */
+ if (queue < 0)
+ queue = 0;
+ else if (queue >= vsi->num_queue_pairs)
+ return -EINVAL;
+
+ rx_ring = vsi->rx_rings[queue];
+ tx_ring = vsi->tx_rings[queue];
+
+ if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting))
ec->use_adaptive_rx_coalesce = 1;
- if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting))
ec->use_adaptive_tx_coalesce = 1;
- ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
- ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+
/* we use the _usecs_high to store/set the interrupt rate limit
* that the hardware supports, that almost but not quite
* fits the original intent of the ethtool variable,
return 0;
}
-static int i40e_set_coalesce(struct net_device *netdev,
+/**
+ * i40e_get_coalesce - get a netdev's coalesce settings
+ * @netdev: the netdev to check
+ * @ec: ethtool coalesce data structure
+ *
+ * Gets the coalesce settings for a particular netdev. Note that if user has
+ * modified per-queue settings, this only guarantees to represent queue 0. See
+ * __i40e_get_coalesce for more details.
+ **/
+static int i40e_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
+ return __i40e_get_coalesce(netdev, ec, -1);
+}
+
+#ifdef ETHTOOL_PERQUEUE
+/**
+ * i40e_get_per_queue_coalesce - gets coalesce settings for particular queue
+ * @netdev: netdev structure
+ * @ec: ethtool's coalesce settings
+ * @queue: the particular queue to read
+ *
+ * Will read a specific queue's coalesce settings
+ **/
+static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __i40e_get_coalesce(netdev, ec, queue);
+}
+
+#endif /* ETHTOOL_PERQUEUE */
+/**
+ * i40e_set_itr_per_queue - set ITR values for specific queue
+ * @vsi: the VSI to set values for
+ * @ec: coalesce settings from ethtool
+ * @queue: the queue to modify
+ *
+ * Change the ITR settings for a specific queue.
+ **/
+static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
+ struct ethtool_coalesce *ec,
+ int queue)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
struct i40e_q_vector *q_vector;
+ u16 vector, intrl;
+
+ intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit);
+
+ vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs;
+ vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs;
+
+ if (ec->use_adaptive_rx_coalesce)
+ vsi->rx_rings[queue]->rx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->rx_rings[queue]->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ if (ec->use_adaptive_tx_coalesce)
+ vsi->tx_rings[queue]->tx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->tx_rings[queue]->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ q_vector = vsi->rx_rings[queue]->q_vector;
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[queue]->rx_itr_setting);
+ vector = vsi->base_vector + q_vector->v_idx;
+ wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
+
+ q_vector = vsi->tx_rings[queue]->q_vector;
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[queue]->tx_itr_setting);
+ vector = vsi->base_vector + q_vector->v_idx;
+ wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
+
+ wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl);
+ i40e_flush(hw);
+}
+
+/**
+ * __i40e_set_coalesce - set coalesce settings for particular queue
+ * @netdev: the netdev to change
+ * @ec: ethtool coalesce settings
+ * @queue: the queue to change
+ *
+ * Sets the coalesce settings for a particular queue.
+ **/
+static int __i40e_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ int queue)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
- u16 vector;
+ u16 intrl_reg;
int i;
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
vsi->work_limit = ec->tx_max_coalesced_frames_irq;
- /* tx_coalesce_usecs_high is ignored, use rx-usecs-high to adjust limit */
+ /* tx_coalesce_usecs_high is ignored, use rx-usecs-high instead */
if (ec->tx_coalesce_usecs_high != vsi->int_rate_limit) {
netif_info(pf, drv, netdev, "tx-usecs-high is not used, please program rx-usecs-high\n");
return -EINVAL;
}
- if (ec->rx_coalesce_usecs_high >= INTRL_REG_TO_USEC(I40E_MAX_INTRL)) {
- netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-235\n");
+ if (ec->rx_coalesce_usecs_high > INTRL_REG_TO_USEC(I40E_MAX_INTRL)) {
+ netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-%lu\n",
+ INTRL_REG_TO_USEC(I40E_MAX_INTRL));
return -EINVAL;
}
- vector = vsi->base_vector;
- if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
- (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
- vsi->rx_itr_setting = ec->rx_coalesce_usecs;
- } else if (ec->rx_coalesce_usecs == 0) {
- vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+ if (ec->rx_coalesce_usecs == 0) {
if (ec->use_adaptive_rx_coalesce)
netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
- } else {
+ } else if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
return -EINVAL;
}
- vsi->int_rate_limit = ec->rx_coalesce_usecs_high;
+ intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high);
+ vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg);
+ if (vsi->int_rate_limit != ec->rx_coalesce_usecs_high) {
+ netif_info(pf, drv, netdev, "Interrupt rate limit rounded down to %d\n",
+ vsi->int_rate_limit);
+ }
- if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
- (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
- vsi->tx_itr_setting = ec->tx_coalesce_usecs;
- } else if (ec->tx_coalesce_usecs == 0) {
- vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+ if (ec->tx_coalesce_usecs == 0) {
if (ec->use_adaptive_tx_coalesce)
netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
- } else {
+ } else if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
return -EINVAL;
}
- if (ec->use_adaptive_rx_coalesce)
- vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
- else
- vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
-
- if (ec->use_adaptive_tx_coalesce)
- vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
- else
- vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
-
- for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
- u16 intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit);
-
- q_vector = vsi->q_vectors[i];
- q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
- wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
- q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
- wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
- wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl);
- i40e_flush(hw);
+ /* rx and tx usecs has per queue value. If user doesn't specify the
+ * queue, apply to all queues.
+ */
+ if (queue < 0) {
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ i40e_set_itr_per_queue(vsi, ec, i);
+ } else if (queue < vsi->num_queue_pairs) {
+ i40e_set_itr_per_queue(vsi, ec, queue);
+ } else {
+ netif_info(pf, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
+ vsi->num_queue_pairs - 1);
+ return -EINVAL;
}
return 0;
}
+/**
+ * i40e_set_coalesce - set coalesce settings for every queue on the netdev
+ * @netdev: the netdev to change
+ * @ec: ethtool coalesce settings
+ *
+ * This will set each queue to the same coalesce settings.
+ **/
+static int i40e_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ return __i40e_set_coalesce(netdev, ec, -1);
+}
+
#ifdef ETHTOOL_SRXNTUPLE
/* We need to keep this around for kernels 2.6.33 - 2.6.39 in order to avoid
* a null pointer dereference as it was assumend if the NETIF_F_NTUPLE flag
}
#endif /* ETHTOOL_SRXNTUPLE */
+
+#ifdef ETHTOOL_PERQUEUE
+/**
+ * i40e_set_per_queue_coalesce - set specific queue's coalesce settings
+ * @netdev: the netdev to change
+ * @ec: ethtool's coalesce settings
+ * @queue: the queue to change
+ *
+ * Sets the specified queue's coalesce settings.
+ **/
+static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __i40e_set_coalesce(netdev, ec, queue);
+}
+#endif /* ETHTOOL_PERQUEUE */
+
#ifdef ETHTOOL_GRXRINGS
/**
* i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
/* Process bits of hash input set */
if (i_set) {
- if (i_set & BIT_ULL(I40E_L4_SRC_SHIFT))
+ if (i_set & I40E_L4_SRC_MASK)
cmd->data |= RXH_L4_B_0_1;
- if (i_set & BIT_ULL(I40E_L4_DST_SHIFT))
+ if (i_set & I40E_L4_DST_MASK)
cmd->data |= RXH_L4_B_2_3;
if (cmd->flow_type == TCP_V4_FLOW ||
cmd->flow_type == UDP_V4_FLOW) {
- if (i_set & BIT_ULL(I40E_L3_SRC_SHIFT))
+ if (i_set & I40E_L3_SRC_MASK)
cmd->data |= RXH_IP_SRC;
- if (i_set & BIT_ULL(I40E_L3_DST_SHIFT))
+ if (i_set & I40E_L3_DST_MASK)
cmd->data |= RXH_IP_DST;
} else if (cmd->flow_type == TCP_V6_FLOW ||
cmd->flow_type == UDP_V6_FLOW) {
- if (i_set & BIT_ULL(I40E_L3_V6_SRC_SHIFT))
+ if (i_set & I40E_L3_V6_SRC_MASK)
cmd->data |= RXH_IP_SRC;
- if (i_set & BIT_ULL(I40E_L3_V6_DST_SHIFT))
+ if (i_set & I40E_L3_V6_DST_MASK)
cmd->data |= RXH_IP_DST;
}
}
/* Present the value of user-def as part of get filters */
if (i40e_is_flex_filter(rule)) {
- fsp->h_ext.data[1] = (__be32)((rule->flex_bytes[3] << 16) |
- rule->flex_bytes[2]);
- fsp->m_ext.data[1] = (__be32)((rule->flex_mask[3] << 16) |
- rule->flex_mask[2]);
+ u32 flex_temp;
+
+ flex_temp = (be16_to_cpu(rule->flex_bytes[3]) << 16) |
+ be16_to_cpu(rule->flex_bytes[2]);
+ fsp->h_ext.data[1] = cpu_to_be32(flex_temp);
+ flex_temp = (be16_to_cpu(rule->flex_mask[3]) << 16) |
+ be16_to_cpu(rule->flex_mask[2]);
+ fsp->m_ext.data[1] = cpu_to_be32(flex_temp);
fsp->flow_type |= FLOW_EXT;
}
u64 src_l3 = 0, dst_l3 = 0;
if (nfc->data & RXH_L4_B_0_1)
- i_set |= BIT_ULL(I40E_L4_SRC_SHIFT);
+ i_set |= I40E_L4_SRC_MASK;
else
- i_set &= ~BIT_ULL(I40E_L4_SRC_SHIFT);
+ i_set &= ~I40E_L4_SRC_MASK;
if (nfc->data & RXH_L4_B_2_3)
- i_set |= BIT_ULL(I40E_L4_DST_SHIFT);
+ i_set |= I40E_L4_DST_MASK;
else
- i_set &= ~BIT_ULL(I40E_L4_DST_SHIFT);
+ i_set &= ~I40E_L4_DST_MASK;
if (nfc->flow_type == TCP_V6_FLOW || nfc->flow_type == UDP_V6_FLOW) {
- src_l3 = I40E_L3_V6_SRC_SHIFT;
- dst_l3 = I40E_L3_V6_DST_SHIFT;
+ src_l3 = I40E_L3_V6_SRC_MASK;
+ dst_l3 = I40E_L3_V6_DST_MASK;
} else if (nfc->flow_type == TCP_V4_FLOW ||
nfc->flow_type == UDP_V4_FLOW) {
- src_l3 = I40E_L3_SRC_SHIFT;
- dst_l3 = I40E_L3_DST_SHIFT;
+ src_l3 = I40E_L3_SRC_MASK;
+ dst_l3 = I40E_L3_DST_MASK;
} else {
/* Any other flow type are not supported here */
return i_set;
}
if (nfc->data & RXH_IP_SRC)
- i_set |= BIT_ULL(src_l3);
+ i_set |= src_l3;
else
- i_set &= ~BIT_ULL(src_l3);
+ i_set &= ~src_l3;
if (nfc->data & RXH_IP_DST)
- i_set |= BIT_ULL(dst_l3);
+ i_set |= dst_l3;
else
- i_set &= ~BIT_ULL(dst_l3);
+ i_set &= ~dst_l3;
return i_set;
}
u8 pit_idx = I40E_FLEX_PIT_IDX_START_L4;
u64 dest_word_l4, dest_word6, dest_word7;
- switch (flow_type & FLOW_TYPE_MASK) {
+ switch (flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
idx = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
input_set = &pf->fd_tcp4_input_set;
i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FLX_PIT(idx), 0);
/* Time to reset value of input set based on flow-type */
- if (input_set && (!(*input_set))) {
+ if (!*input_set) {
i40e_write_fd_input_set(pf, idx, *input_set);
*input_set = 0;
}
(rule->src_port != input->src_port) ||
(rule->flow_type != input->flow_type) ||
(rule->ip4_proto != input->ip4_proto) ||
- (rule->sctp_v_tag != input->sctp_v_tag) ||
- (rule->q_index != input->q_index))
+ (rule->sctp_v_tag != input->sctp_v_tag))
return false;
/* handle flex_filter, decide based upon pattern equality */
netif_err(pf, drv, vsi->netdev,
"Previous flex filter(ID: %u) exists for flow-type %u whose flex mask (aka 'offset'): %u is different from current mask :%u specified. Please delete previous flex filter and try again.\n",
input->fd_id,
- input->flow_type & FLOW_TYPE_MASK,
+ input->flow_type & ~FLOW_EXT,
existing_mask, specified_mask);
return -EINVAL;
}
return -EINVAL;
pf = vsi->back;
- switch (fsp->flow_type & FLOW_TYPE_MASK) {
+ switch (fsp->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
idx = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
header_len = I40E_TCPIP_DUMMY_PACKET_LEN;
* It is used upon ntuple off/on and when all filters are deleted
* for a given flow.
*/
- if (input_set && (!(*input_set)))
+ if (!*input_set)
*input_set = val;
if (!i40e_is_flex_filter(input))
netif_err(pf, drv, vsi->netdev, "Change of input set is not supported when there are existing filters(%u) for specified flow-type: %u. Please delete them and re-try\n",
(flex_flow_based_filter_cnt) ?
flex_flow_based_filter_cnt : flow_based_filter_cnt,
- fsp->flow_type & FLOW_TYPE_MASK);
+ fsp->flow_type & ~FLOW_EXT);
return -EOPNOTSUPP;
}
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return -EOPNOTSUPP;
- if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
+ if (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)
return -ENOSPC;
if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
input->dest_vsi = vsi->id;
input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
- input->flow_type = fsp->flow_type;
+ input->flow_type = fsp->flow_type & ~FLOW_EXT;
input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
/* Reverse the src and dest notion, since the HW expects them to be from
input->flex_mask[3] = 0;
if ((fsp->h_ext.data[0] == cpu_to_be32(0x0)) &&
(fsp->h_ext.data[1] != cpu_to_be32(~0))) {
- input->flex_bytes[2] = fsp->h_ext.data[1];
+ u16 flex_temp;
+
+ flex_temp = be32_to_cpu(fsp->h_ext.data[1]);
+ input->flex_bytes[2] = cpu_to_be16(flex_temp);
if (input->flex_bytes[2]) {
netif_err(pf, drv, vsi->netdev,
"Only one word is supported for flex filter\n");
goto free_input;
}
/* Store only relevant section of user-defs */
- input->flex_bytes[3] = fsp->h_ext.data[1] >> 16;
- input->flex_mask[3] = fsp->m_ext.data[1] >> 16;
+ flex_temp = be32_to_cpu(fsp->h_ext.data[1]) >> 16;
+ input->flex_bytes[3] = cpu_to_be16(flex_temp);
+ flex_temp = be32_to_cpu(fsp->m_ext.data[1]) >> 16;
+ input->flex_mask[3] = cpu_to_be16(flex_temp);
}
}
*flags = 0;
- switch (fsp->flow_type & FLOW_TYPE_MASK) {
+ switch (fsp->flow_type & ~FLOW_EXT) {
case ETHER_FLOW:
/* use is_broadcast and is_zero to check for all 0xf or 0 */
if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) {
* see if 0xffffffff or a non-zero three-byte tenant id was set.
*/
tenant_id = be32_to_cpu(fsp->h_ext.data[0]);
- if (tenant_id && tenant_id <= 0xffffff) {
- i |= I40E_CLOUD_FIELD_TEN_ID;
- } else if (tenant_id == 0xffffffff || tenant_id == 0) {
+ if (tenant_id == 0xffffffff || tenant_id == 0) {
i &= ~I40E_CLOUD_FIELD_TEN_ID;
+ } else if (tenant_id & 0x00ffffff) {
+ i |= I40E_CLOUD_FIELD_TEN_ID;
} else {
dev_info(&pf->pdev->dev, "Bad tenant/vxlan id %d\n", tenant_id);
return I40E_ERR_CONFIG;
u8 flags = 0;
int ret;
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ return -EOPNOTSUPP;
+
if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
return -EBUSY;
filter->id = fsp->location;
filter->seid = dst_vsi->seid;
- switch (fsp->flow_type & FLOW_TYPE_MASK) {
+ switch (fsp->flow_type & ~FLOW_EXT) {
case ETHER_FLOW:
ether_addr_copy(filter->outer_mac,
fsp->h_u.ether_spec.h_dest);
case IP_USER_FLOW:
if (flags & I40E_CLOUD_FIELD_TEN_ID) {
dev_info(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
+ kfree(filter);
return I40E_ERR_CONFIG;
}
filter->inner_ip[0] = fsp->h_u.usr_ip4_spec.ip4dst;
default:
dev_info(&pf->pdev->dev, "unknown flow type 0x%x\n",
- (fsp->flow_type & FLOW_TYPE_MASK));
+ (fsp->flow_type & ~FLOW_EXT));
kfree(filter);
return I40E_ERR_CONFIG;
}
if (be32_to_cpu(fsp->h_ext.data[0]) != 0xffffffff) {
- filter->tenant_id = be32_to_cpu(fsp->h_ext.data[0]);
- filter->tunnel_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
+ u32 id = be32_to_cpu(fsp->h_ext.data[0]);
+
+ filter->tenant_id = id & 0x00ffffff;
+ filter->tunnel_type = (id >> 24) & 0xff;
} else {
/* L3 VEB filter for non-tunneled packets or a tuple w/o vni */
filter->tenant_id = 0;
if (ret) {
kfree(filter);
dev_info(&pf->pdev->dev,
- "fail to add cloud filter, err = %d\n", ret);
+ "fail to add cloud filter, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
return i40e_aq_rc_to_posix(ret, pf->hw.aq.asq_last_status);
}
static int i40e_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
+ const u8 drop = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
struct i40e_netdev_priv *np = netdev_priv(dev);
unsigned int count = ch->combined_count;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ struct i40e_fdir_filter *rule;
+ struct hlist_node *node2;
int new_count;
+ int err = 0;
/* We do not support setting channels for any other VSI at present */
if (vsi->type != I40E_VSI_MAIN)
if (count > i40e_max_channels(vsi))
return -EINVAL;
+ /* verify that the number of channels does not invalidate any current
+ * flow director rules
+ */
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ if (rule->dest_ctl != drop && count <= rule->q_index) {
+ dev_warn(&pf->pdev->dev,
+ "Existing user defined filter %d assigns flow to queue %d\n",
+ rule->fd_id, rule->q_index);
+ err = -EINVAL;
+ }
+ }
+
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "Existing filter rules must be deleted to reduce combined channel count to %d\n",
+ count);
+ return err;
+ }
+
/* update feature limits from largest to smallest supported values */
/* TODO: Flow director limit, DCB etc */
#endif /* ETHTOOL_SCHANNELS */
-#if defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
+#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)
/**
* i40e_get_rxfh_key_size - get the RSS hash key size
* @netdev: network interface device structure
{
return I40E_HKEY_ARRAY_SIZE;
}
-#endif /* ETHTOOL_GRSSH */
-#ifdef ETHTOOL_GRXFHINDIR
-#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE
/**
* i40e_get_rxfh_indir_size - get the rx flow hash indirection table size
* @netdev: network interface device structure
return I40E_HLUT_ARRAY_SIZE;
}
-#if defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
-#ifdef HAVE_RXFH_HASHFUNC
-static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
-#else
/**
* i40e_get_rxfh - get the rx flow hash indirection table
* @netdev: network interface device structure
*
* Reads the indirection table directly from the hardware. Always returns 0.
**/
+#ifdef HAVE_RXFH_HASHFUNC
+static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+#else
static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
#endif
-#else
-/**
- * i40e_get_rxfh - get the rx flow hash indirection table
- * @netdev: network interface device structure
- * @indir: indirection table
- *
- * Reads the indirection table directly from the hardware. Always returns 0.
- **/
-static int i40e_get_rxfh_indir(struct net_device *netdev, u32 *indir)
-#endif /* ETHTOOL_GRSSH */
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
int ret;
u16 i;
-#if defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
#ifdef HAVE_RXFH_HASHFUNC
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
#endif
-#endif /* ETHTOOl_GRSSH */
-
if (!indir)
return 0;
-#if defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
seed = key;
-#endif
+
lut = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL);
if (!lut)
return -ENOMEM;
+
ret = i40e_get_rss(vsi, seed, lut, I40E_HLUT_ARRAY_SIZE);
if (ret)
goto out;
- for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
- indir[i] = (u32)(lut[i]);
-
-out:
- kfree(lut);
-
- return ret;
-}
-#else
-/**
- * i40e_get_rxfh_indir - get the rx flow hash indirection table
- * @netdev: network interface device structure
- * @indir: indirection table
- *
- * Reads the indirection table directly from the hardware. Returns 0 or -EINVAL
- * if the supplied table isn't large enough.
- **/
-static int i40e_get_rxfh_indir(struct net_device *netdev,
- struct ethtool_rxfh_indir *indir)
-{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_vsi *vsi = np->vsi;
- u8 *lut;
- int ret;
- u16 i;
- if (indir->size < I40E_HLUT_ARRAY_SIZE)
- return -EINVAL;
-
- lut = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL);
- if (!lut)
- return -ENOMEM;
- ret = i40e_get_rss(vsi, NULL, lut, I40E_HLUT_ARRAY_SIZE);
- if (ret)
- goto out;
for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
- indir->ring_index[i] = (u32)(lut[i]);
- indir->size = I40E_HLUT_ARRAY_SIZE;
+ indir[i] = (u32)(lut[i]);
out:
kfree(lut);
return ret;
}
-#endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */
-#endif /* ETHTOOL_GRXFHINDIR */
-#ifdef ETHTOOL_SRXFHINDIR
-#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE
-#if defined(ETHTOOL_SRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
/**
* i40e_set_rxfh - set the rx flow hash indirection table
* @netdev: network interface device structure
static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
#else
+#ifdef HAVE_RXFH_NONCONST
+static int i40e_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
+#else
static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key)
-#endif
-#else
-/**
- * i40e_set_rxfh_indir - set the rx flow hash indirection table
- * @netdev: network interface device structure
- * @indir: indirection table
- *
- * Returns -EINVAL if the table specifies an invalid queue id, otherwise
- * returns 0 after programming the table.
- **/
-static int i40e_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
-#endif /* EHTTOOL_SRSSH */
+#endif /* HAVE_RXFH_NONCONST */
+#endif /* HAVE_RXFH_HASHFUNC */
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
u8 *seed = NULL;
u16 i;
-
-#if defined(ETHTOOL_SRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
#ifdef HAVE_RXFH_HASHFUNC
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
#endif
-#endif /* ETHTOOl_SRSSH */
-
- if (!indir)
- return 0;
/* Verify user input. */
- for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) {
- if (indir[i] >= vsi->rss_size)
- return -EINVAL;
+ if (indir) {
+ for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) {
+ if (indir[i] >= vsi->rss_size)
+ return -EINVAL;
+ }
}
-#if defined(ETHTOOL_SRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
if (key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE,
memcpy(vsi->rss_hkey_user, key, I40E_HKEY_ARRAY_SIZE);
seed = vsi->rss_hkey_user;
}
-#endif
+
if (!vsi->rss_lut_user) {
vsi->rss_lut_user = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL);
if (!vsi->rss_lut_user)
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
- vsi->rss_lut_user[i] = (u8)(indir[i]);
+ if (indir)
+ for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
+ vsi->rss_lut_user[i] = (u8)(indir[i]);
+ else
+ i40e_fill_rss_lut(pf, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE,
+ vsi->rss_size);
return i40e_config_rss(vsi, seed, vsi->rss_lut_user,
I40E_HLUT_ARRAY_SIZE);
}
-#else
-/**
- * i40e_set_rxfh_indir - set the rx flow hash indirection table
- * @netdev: network interface device structure
- * @indir: indirection table
- *
- * Returns -EINVAL if the table specifies an invalid queue id, otherwise
- * returns 0 after programming the table.
- **/
-static int i40e_set_rxfh_indir(struct net_device *netdev,
- const struct ethtool_rxfh_indir *indir)
-{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_vsi *vsi = np->vsi;
- u16 i;
-
- if (indir->size < I40E_HLUT_ARRAY_SIZE)
- return -EINVAL;
-
- /* Verify user input. */
- for (i = 0; i < (I40E_PFQF_HLUT_MAX_INDEX + 1) * 4; i++) {
- if (indir->ring_index[i] >= vsi->rss_size)
- return -EINVAL;
- }
-
- if (!vsi->rss_lut_user) {
- vsi->rss_lut_user = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL);
- if (!vsi->rss_lut_user)
- return -ENOMEM;
- }
-
- /* Each 32 bits pointed by 'ring_index' is stored with a lut entry */
- for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
- vsi->rss_lut_user[i] = (u8)(indir->ring_index[i]);
-
- return i40e_config_rss(vsi, NULL, vsi->rss_lut_user,
- I40E_HLUT_ARRAY_SIZE);
-}
-#endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */
-#endif /* ETHTOOL_SRXFHINDIR */
+#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */
#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
/**
I40E_PRIV_FLAGS_FD_ATR : 0;
ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
I40E_PRIV_FLAGS_VEB_STATS : 0;
- ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
+ ret_flags |= pf->hw_disabled_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
0 : I40E_PRIV_FLAGS_HW_ATR_EVICT;
if (pf->hw.pf_id == 0) {
ret_flags |= pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT ?
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
} else {
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
+
+ /* flush current ATR settings */
+ set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
}
if ((flags & I40E_PRIV_FLAGS_VEB_STATS) &&
}
if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) &&
- (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))
- pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+ (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))
+ pf->hw_disabled_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
else
- pf->auto_disable_flags |= I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+ pf->hw_disabled_flags |= I40E_FLAG_HW_ATR_EVICT_CAPABLE;
/* if needed, issue reset to cause things to take effect */
if (reset_required)
#endif
.get_coalesce = i40e_get_coalesce,
.set_coalesce = i40e_set_coalesce,
-#if defined(ETHTOOL_SRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
- .get_rxfh_key_size = i40e_get_rxfh_key_size,
-#endif
#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
-#ifdef ETHTOOL_GRXFHINDIR
-#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE
+#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)
+ .get_rxfh_key_size = i40e_get_rxfh_key_size,
.get_rxfh_indir_size = i40e_get_rxfh_indir_size,
-#endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */
-#ifdef ETHTOOL_GRSSH
.get_rxfh = i40e_get_rxfh,
-#else
- .get_rxfh_indir = i40e_get_rxfh_indir,
-#endif /* ETHTOOL_GRSSH */
-#endif /* ETHTOOL_GRXFHINDIR */
-#ifdef ETHTOOL_SRXFHINDIR
-#ifdef ETHTOOL_SRSSH
.set_rxfh = i40e_set_rxfh,
-#else
- .set_rxfh_indir = i40e_set_rxfh_indir,
-#endif /* ETHTOOL_SRSSH */
-#endif /* ETHTOOL_SRXFHINDIR */
+#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */
#ifdef ETHTOOL_SCHANNELS
.get_channels = i40e_get_channels,
.set_channels = i40e_set_channels,
#ifdef HAVE_ETHTOOL_GET_TS_INFO
.get_ts_info = i40e_get_ts_info,
#endif /* HAVE_ETHTOOL_GET_TS_INFO */
+#ifdef ETHTOOL_PERQUEUE
+ .get_per_queue_coalesce = i40e_get_per_queue_coalesce,
+ .set_per_queue_coalesce = i40e_set_per_queue_coalesce,
+#endif /* ETHTOOL_PERQUEUE */
#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
};
.set_phys_id = i40e_set_phys_id,
.get_channels = i40e_get_channels,
.set_channels = i40e_set_channels,
+#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)
+ .get_rxfh_key_size = i40e_get_rxfh_key_size,
.get_rxfh_indir_size = i40e_get_rxfh_indir_size,
- .get_rxfh_indir = i40e_get_rxfh_indir,
- .set_rxfh_indir = i40e_set_rxfh_indir,
+ .get_rxfh = i40e_get_rxfh,
+ .set_rxfh = i40e_set_rxfh,
+#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */
};
void i40e_set_ethtool_ops(struct net_device *netdev)
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
ddp = &fcoe->ddp[xid];
if (ddp->sgl) {
dev_info(&pf->pdev->dev, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
- xid, ddp->sgl, ddp->sgc);
+ xid, ddp->sgl, ddp->sgc);
return 0;
}
i40e_fcoe_ddp_clear(ddp);
* must end at bufflen
*/
if (((i != (dmacount - 1)) || (thislen != len)) &&
- ((thislen + thisoff) != bufflen))
+ ((thislen + thisoff) != bufflen))
goto out_noddp_free;
ddp->udl[j] = (u64)(addr - thisoff);
/**
* i40e_fcoe_tso - set up FCoE TSO
* @tx_ring: ring to send buffer on
- * @skb: send buffer
+ * @first: pointer to first Tx buffer for xmit
* @tx_flags: collected send information
* @hdr_len: the tso header length
* @sof: the SOF to indicate class of service
* code to drop the frame.
**/
static int i40e_fcoe_tso(struct i40e_ring *tx_ring,
- struct sk_buff *skb,
+ struct i40e_tx_buffer *first,
u32 tx_flags, u8 *hdr_len, u8 sof)
{
+ struct sk_buff *skb = first->skb;
struct i40e_tx_context_desc *context_desc;
u32 cd_type, cd_cmd, cd_tso_len, cd_mss;
struct fc_frame_header *fh;
u64 cd_type_cmd_tso_mss;
+ u16 gso_segs, gso_size;
/* must match gso type as FCoE */
if (!skb_is_gso(skb))
*hdr_len = skb_transport_offset(skb) + sizeof(struct fc_frame_header) +
sizeof(struct fcoe_crc_eof);
+ /* pull values out of skb_shinfo */
+ gso_size = skb_shinfo(skb)->gso_size;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+#ifndef HAVE_NDO_FEATURES_CHECK
+ /* too small a TSO segment size causes hw problems */
+ if (gso_size < 64) {
+ gso_size = 64;
+ gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, 64);
+ }
+#endif
+ /* update gso size and bytecount with header size */
+ first->gso_segs = gso_segs;
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
/* check sof to decide a class 2 or 3 TSO */
if (likely(i40e_fcoe_sof_is_class3(sof)))
cd_cmd = I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3;
/* fill the field values */
cd_type = I40E_TX_DESC_DTYPE_FCOE_CTX;
cd_tso_len = skb->len - *hdr_len;
- cd_mss = skb_shinfo(skb)->gso_size;
+ cd_mss = gso_size;
cd_type_cmd_tso_mss =
((u64)cd_type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
skb_reset_mac_header(skb);
skb->mac_len = sizeof(struct ethhdr);
if (protocol == htons(ETH_P_8021Q)) {
- struct vlan_ethhdr *veth = (struct vlan_ethhdr *) eth_hdr(skb);
+ struct vlan_ethhdr *veth = (struct vlan_ethhdr *)eth_hdr(skb);
protocol = veth->h_vlan_encapsulated_proto;
skb->mac_len += sizeof(struct vlan_hdr);
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_bi[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
/* FIP is a regular L2 traffic w/o offload */
if (skb->protocol == htons(ETH_P_FIP))
tx_flags |= I40E_TX_FLAGS_FCCRC;
/* check we should do sequence offload */
- fso = i40e_fcoe_tso(tx_ring, skb, tx_flags, &hdr_len, sof);
+ fso = i40e_fcoe_tso(tx_ring, first, tx_flags, &hdr_len, sof);
if (fso < 0)
goto out_drop;
else if (fso)
return NETDEV_TX_OK;
out_drop:
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
return NETDEV_TX_OK;
}
*/
netdev->dev_port = 1;
#endif
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
- i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
- i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
- i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_add_mac_filter(vsi, hw->mac.san_addr);
+ i40e_add_mac_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC);
+ i40e_add_mac_filter(vsi, FIP_ALL_FCOE_MACS);
+ i40e_add_mac_filter(vsi, FIP_ALL_ENODE_MACS);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* use san mac */
ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* Calculates the maximum amount of memory for the function required, based
* on the number of resources it must provide context for.
**/
-u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
u32 fcoe_cntx_num, u32 fcoe_filt_num)
{
u64 fpm_size = 0;
* This will allocate memory for PDs and backing pages and populate
* the sd and pd entries.
**/
-i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
+static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
struct i40e_hmc_lan_create_obj_info *info)
{
i40e_status ret_code = I40E_SUCCESS;
* caller should deallocate memory allocated previously for
* book-keeping information about PDs and backing storage.
**/
-i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
struct i40e_hmc_lan_delete_obj_info *info)
{
i40e_status ret_code = I40E_SUCCESS;
i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
}
-/**
- * i40e_read_byte - read HMC context byte into struct
- * @hmc_bits: pointer to the HMC memory
- * @ce_info: a description of the struct to be filled
- * @dest: the struct to be filled
- **/
-static void i40e_read_byte(u8 *hmc_bits,
- struct i40e_context_ele *ce_info,
- u8 *dest)
-{
- u8 dest_byte, mask;
- u8 *src, *target;
- u16 shift_width;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = (u8)(BIT(ce_info->width) - 1);
-
- /* shift to correct alignment */
- mask <<= shift_width;
-
- /* get the current bits from the src bit string */
- src = hmc_bits + (ce_info->lsb / 8);
-
- i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
-
- dest_byte &= ~(mask);
-
- dest_byte >>= shift_width;
-
- /* get the address from the struct field */
- target = dest + ce_info->offset;
-
- /* put it back in the struct */
- i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
-}
-
-/**
- * i40e_read_word - read HMC context word into struct
- * @hmc_bits: pointer to the HMC memory
- * @ce_info: a description of the struct to be filled
- * @dest: the struct to be filled
- **/
-static void i40e_read_word(u8 *hmc_bits,
- struct i40e_context_ele *ce_info,
- u8 *dest)
-{
- u16 dest_word, mask;
- u8 *src, *target;
- u16 shift_width;
- __le16 src_word;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = BIT(ce_info->width) - 1;
-
- /* shift to correct alignment */
- mask <<= shift_width;
-
- /* get the current bits from the src bit string */
- src = hmc_bits + (ce_info->lsb / 8);
-
- i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
-
- /* the data in the memory is stored as little endian so mask it
- * correctly
- */
- src_word &= ~(CPU_TO_LE16(mask));
-
- /* get the data back into host order before shifting */
- dest_word = LE16_TO_CPU(src_word);
-
- dest_word >>= shift_width;
-
- /* get the address from the struct field */
- target = dest + ce_info->offset;
-
- /* put it back in the struct */
- i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
-}
-
-/**
- * i40e_read_dword - read HMC context dword into struct
- * @hmc_bits: pointer to the HMC memory
- * @ce_info: a description of the struct to be filled
- * @dest: the struct to be filled
- **/
-static void i40e_read_dword(u8 *hmc_bits,
- struct i40e_context_ele *ce_info,
- u8 *dest)
-{
- u32 dest_dword, mask;
- u8 *src, *target;
- u16 shift_width;
- __le32 src_dword;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
-
- /* if the field width is exactly 32 on an x86 machine, then the shift
- * operation will not work because the SHL instructions count is masked
- * to 5 bits so the shift will do nothing
- */
- if (ce_info->width < 32)
- mask = BIT(ce_info->width) - 1;
- else
- mask = ~(u32)0;
-
- /* shift to correct alignment */
- mask <<= shift_width;
-
- /* get the current bits from the src bit string */
- src = hmc_bits + (ce_info->lsb / 8);
-
- i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
-
- /* the data in the memory is stored as little endian so mask it
- * correctly
- */
- src_dword &= ~(CPU_TO_LE32(mask));
-
- /* get the data back into host order before shifting */
- dest_dword = LE32_TO_CPU(src_dword);
-
- dest_dword >>= shift_width;
-
- /* get the address from the struct field */
- target = dest + ce_info->offset;
-
- /* put it back in the struct */
- i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
- I40E_NONDMA_TO_DMA);
-}
-
-/**
- * i40e_read_qword - read HMC context qword into struct
- * @hmc_bits: pointer to the HMC memory
- * @ce_info: a description of the struct to be filled
- * @dest: the struct to be filled
- **/
-static void i40e_read_qword(u8 *hmc_bits,
- struct i40e_context_ele *ce_info,
- u8 *dest)
-{
- u64 dest_qword, mask;
- u8 *src, *target;
- u16 shift_width;
- __le64 src_qword;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
-
- /* if the field width is exactly 64 on an x86 machine, then the shift
- * operation will not work because the SHL instructions count is masked
- * to 6 bits so the shift will do nothing
- */
- if (ce_info->width < 64)
- mask = BIT_ULL(ce_info->width) - 1;
- else
- mask = ~(u64)0;
-
- /* shift to correct alignment */
- mask <<= shift_width;
-
- /* get the current bits from the src bit string */
- src = hmc_bits + (ce_info->lsb / 8);
-
- i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
-
- /* the data in the memory is stored as little endian so mask it
- * correctly
- */
- src_qword &= ~(CPU_TO_LE64(mask));
-
- /* get the data back into host order before shifting */
- dest_qword = LE64_TO_CPU(src_qword);
-
- dest_qword >>= shift_width;
-
- /* get the address from the struct field */
- target = dest + ce_info->offset;
-
- /* put it back in the struct */
- i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
- I40E_NONDMA_TO_DMA);
-}
-
-/**
- * i40e_get_hmc_context - extract HMC context bits
- * @context_bytes: pointer to the context bit array
- * @ce_info: a description of the struct to be filled
- * @dest: the struct to be filled
- **/
-static i40e_status i40e_get_hmc_context(u8 *context_bytes,
- struct i40e_context_ele *ce_info,
- u8 *dest)
-{
- int f;
-
- for (f = 0; ce_info[f].width != 0; f++) {
- switch (ce_info[f].size_of) {
- case 1:
- i40e_read_byte(context_bytes, &ce_info[f], dest);
- break;
- case 2:
- i40e_read_word(context_bytes, &ce_info[f], dest);
- break;
- case 4:
- i40e_read_dword(context_bytes, &ce_info[f], dest);
- break;
- case 8:
- i40e_read_qword(context_bytes, &ce_info[f], dest);
- break;
- default:
- /* nothing to do, just keep going */
- break;
- }
- }
-
- return I40E_SUCCESS;
-}
-
/**
* i40e_clear_hmc_context - zero out the HMC context bits
* @hw: the hardware struct
u64 obj_offset_in_fpm;
u32 sd_idx, sd_lmt;
- if (NULL == hmc_info) {
- ret_code = I40E_ERR_BAD_PTR;
- hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n");
- goto exit;
- }
if (NULL == hmc_info->hmc_obj) {
ret_code = I40E_ERR_BAD_PTR;
hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
return ret_code;
}
-/**
- * i40e_get_lan_tx_queue_context - return the HMC context for the queue
- * @hw: the hardware struct
- * @queue: the queue we care about
- * @s: the struct to be filled
- **/
-i40e_status i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_txq *s)
-{
- i40e_status err;
- u8 *context_bytes;
-
- err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
- if (err < 0)
- return err;
-
- return i40e_get_hmc_context(context_bytes,
- i40e_hmc_txq_ce_info, (u8 *)s);
-}
-
/**
* i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
* @hw: the hardware struct
i40e_hmc_txq_ce_info, (u8 *)s);
}
-/**
- * i40e_get_lan_rx_queue_context - return the HMC context for the queue
- * @hw: the hardware struct
- * @queue: the queue we care about
- * @s: the struct to be filled
- **/
-i40e_status i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_rxq *s)
-{
- i40e_status err;
- u8 *context_bytes;
-
- err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
- if (err < 0)
- return err;
-
- return i40e_get_hmc_context(context_bytes,
- i40e_hmc_rxq_ce_info, (u8 *)s);
-}
-
/**
* i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
* @hw: the hardware struct
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
enum i40e_hmc_model model);
i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
-u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
- u32 fcoe_cntx_num, u32 fcoe_filt_num);
-i40e_status i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_txq *s);
i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
u16 queue);
i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
u16 queue,
struct i40e_hmc_obj_txq *s);
-i40e_status i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_rxq *s);
i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
u16 queue);
i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
u16 queue,
struct i40e_hmc_obj_rxq *s);
-i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
- struct i40e_hmc_lan_create_obj_info *info);
-i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
- struct i40e_hmc_lan_delete_obj_info *info);
#endif /* _I40E_LAN_HMC_H_ */
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#include <net/geneve.h>
#endif
#endif /* HAVE_GENEVE_RX_OFFLOAD */
+#ifdef HAVE_UDP_ENC_RX_OFFLOAD
+#include <net/udp_tunnel.h>
+#endif
char i40e_driver_name[] = "i40e";
static const char i40e_driver_string[] =
#define DRV_VERSION_DESC ""
#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 5
-#define DRV_VERSION_BUILD 19
+#define DRV_VERSION_MINOR 6
+#define DRV_VERSION_BUILD 42
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
DRV_VERSION_DESC __stringify(DRV_VERSION_LOCAL)
const char i40e_driver_version_str[] = DRV_VERSION;
-static const char i40e_copyright[] = "Copyright(c) 2013 - 2016 Intel Corporation.";
+static const char i40e_copyright[] = "Copyright(c) 2013 - 2017 Intel Corporation.";
/* a bit of forward declarations */
static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
-static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
- u16 rss_table_size, u16 rss_size);
static void i40e_clear_rss_config_user(struct i40e_vsi *vsi);
static void i40e_fdir_sb_setup(struct i40e_pf *pf);
static int i40e_veb_get_bw_info(struct i40e_veb *veb);
{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_I_X722), 0},
#endif /* X722_DEV_SUPPORT */
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
/* required last entry */
{0, }
};
module_param_array_named(max_vfs, max_vfs, int, NULL, 0);
MODULE_PARM_DESC(max_vfs,
"Number of Virtual Functions: 0 = disable (default), 1-"
- XSTRINGIFY(I40E_MAX_VF_COUNT) " = enable "
+ __stringify(I40E_MAX_VF_COUNT) " = enable "
"this many VFs");
#endif /* CONFIG_PCI_IOV */
#endif /* HAVE_SRIOV_CONFIGURE */
*
* If not already scheduled, this puts the task into the work queue
**/
-static void i40e_service_event_schedule(struct i40e_pf *pf)
+void i40e_service_event_schedule(struct i40e_pf *pf)
{
if (!test_bit(__I40E_DOWN, &pf->state) &&
- !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
- !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
+ !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
queue_work(i40e_wq, &pf->service_task);
}
unsigned long trans_start;
q = netdev_get_tx_queue(netdev, i);
- trans_start = q->trans_start ? : netdev->trans_start;
- if (netif_xmit_stopped(q) && time_after(jiffies,
- (trans_start + netdev->watchdog_timeo))) {
+ trans_start = q->trans_start;
+ if (netif_xmit_stopped(q) &&
+ time_after(jiffies,
+ (trans_start + netdev->watchdog_timeo))) {
hung_queue = i;
break;
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
if (hung_queue ==
- vsi->tx_rings[i]->queue_index) {
+ vsi->tx_rings[i]->queue_index) {
tx_ring = vsi->tx_rings[i];
break;
}
if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
pf->tx_timeout_recovery_level = 1; /* reset after some time */
else if (time_before(jiffies,
- (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
+ (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
return; /* don't do any new action before the next timeout */
if (tx_ring) {
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
val = rd32(&pf->hw,
I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
- tx_ring->vsi->base_vector - 1));
+ tx_ring->vsi->base_vector - 1));
else
val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
- vsi->seid, hung_queue, tx_ring->next_to_clean,
- head, tx_ring->next_to_use,
- readl(tx_ring->tail), val);
+ vsi->seid, hung_queue, tx_ring->next_to_clean,
+ head, tx_ring->next_to_use,
+ readl(tx_ring->tail), val);
}
pf->tx_timeout_last_recovery = jiffies;
pf->veb[i]->stat_offsets_loaded = false;
}
}
+ pf->hw_csum_rx_error = 0;
#ifdef I40E_ADD_PROBES
pf->tcp_segs = 0;
pf->tx_tcp_cso = 0;
pf->stat_offsets_loaded,
&osd->rx_lpi_count, &nsd->rx_lpi_count);
- if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
+ if (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)
nsd->fd_sb_status = false;
else if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
nsd->fd_sb_status = true;
else
nsd->fd_sb_status = false;
- if (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)
+ if (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED)
nsd->fd_atr_status = false;
else if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
nsd->fd_atr_status = true;
* @vsi: the VSI to be searched
* @macaddr: the MAC address
* @vlan: the vlan
- * @is_vf: make sure its a VF filter, else doesn't matter
- * @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns ptr to the filter object or NULL
**/
struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
- u8 *macaddr, s16 vlan,
- bool is_vf, bool is_netdev)
+ const u8 *macaddr, s16 vlan)
{
struct i40e_mac_filter *f;
+ u64 key;
if (!vsi || !macaddr)
return NULL;
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ key = i40e_addr_to_hkey(macaddr);
+ hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
if ((ether_addr_equal(macaddr, f->macaddr)) &&
- (vlan == f->vlan) &&
- (!is_vf || f->is_vf) &&
- (!is_netdev || f->is_netdev))
+ (vlan == f->vlan))
return f;
}
return NULL;
* i40e_find_mac - Find a mac addr in the macvlan filters list
* @vsi: the VSI to be searched
* @macaddr: the MAC address we are searching for
- * @is_vf: make sure its a VF filter, else doesn't matter
- * @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns the first filter with the provided MAC address or NULL if
* MAC address was not found
**/
-struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
- bool is_vf, bool is_netdev)
+struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
{
struct i40e_mac_filter *f;
+ u64 key;
if (!vsi || !macaddr)
return NULL;
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if ((ether_addr_equal(macaddr, f->macaddr)) &&
- (!is_vf || f->is_vf) &&
- (!is_netdev || f->is_netdev))
+ key = i40e_addr_to_hkey(macaddr);
+ hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
+ if (ether_addr_equal(macaddr, f->macaddr))
return f;
}
return NULL;
**/
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
{
- struct i40e_mac_filter *f;
+ /* If we have a PVID, always operate in VLAN mode */
+ if (vsi->info.pvid)
+ return true;
- /* Only -1 for all the filters denotes not in vlan mode
- * so we have to go through all the list in order to make sure
+ /* We need to operate in VLAN mode whenever we have any filters with
+ * a VLAN other than I40E_VLAN_ALL. We could check the table each
+ * time, incurring search cost repeatedly. However, we can notice two
+ * things:
+ *
+ * 1) the only place where we can gain a VLAN filter is in
+ * i40e_add_filter.
+ *
+ * 2) the only place where filters are actually removed is in
+ * i40e_sync_filters_subtask.
+ *
+ * Thus, we can simply use a boolean value, has_vlan_filters which we
+ * will set to true when we add a vlan filter in i40e_add_filter. Then
+ * we have to perform the full search after deleting filters in
+ * i40e_sync_filters_subtask, but we already have to search
+ * filters here and can perform the check at the same time. This
+ * results in avoiding embedding a loop for vlan mode inside another
+ * loop over all the filters, and should maintain correctness as noted
+ * above.
*/
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if (f->vlan >= 0 || vsi->info.pvid)
- return true;
- }
-
- return false;
+ return vsi->has_vlan_filter;
}
/**
- * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
- * @vsi: the VSI to be searched
- * @macaddr: the mac address to be filtered
- * @is_vf: true if it is a VF
- * @is_netdev: true if it is a netdev
+ * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
+ * @vsi: the vsi to configure
+ * @tmp_add_list: list of filters ready to be added
+ * @tmp_del_list: list of filters ready to be deleted
+ * @vlan_filters: the number of active VLAN filters
*
- * Goes through all the macvlan filters and adds a
- * macvlan filter for each unique vlan that already exists
+ * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
+ * behave as expected. If we have any active VLAN filters remaining or about
+ * to be added then we need to update non-VLAN filters to be marked as VLAN=0
+ * so that they only match against untagged traffic. If we no longer have any
+ * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
+ * so that they match against both tagged and untagged traffic. In this way,
+ * we ensure that we correctly receive the desired traffic. This ensures that
+ * when we have an active VLAN we will receive only untagged traffic and
+ * traffic matching active VLANs. If we have no active VLANs then we will
+ * operate in non-VLAN mode and receive all traffic, tagged or untagged.
*
- * Returns first filter found on success, else NULL
- **/
-struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
- bool is_vf, bool is_netdev)
+ * Finally, in a similar fashion, this function also corrects filters when
+ * there is an active PVID assigned to this VSI.
+ *
+ * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
+ *
+ * This function is only expected to be called from within
+ * i40e_sync_vsi_filters.
+ *
+ * NOTE: This function expects to be called while under the
+ * mac_filter_hash_lock
+ */
+static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
+ struct hlist_head *tmp_add_list,
+ struct hlist_head *tmp_del_list,
+ int vlan_filters)
{
- struct i40e_mac_filter *f;
+ struct i40e_mac_filter *f, *add_head;
+ struct hlist_node *h;
+ int bkt, new_vlan;
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if (vsi->info.pvid)
- f->vlan = le16_to_cpu(vsi->info.pvid);
- if (!i40e_find_filter(vsi, macaddr, f->vlan,
- is_vf, is_netdev)) {
- if (!i40e_add_filter(vsi, macaddr, f->vlan,
- is_vf, is_netdev))
- return NULL;
- }
- }
+ /* To determine if a particular filter needs to be replaced we
+ * have the three following conditions:
+ *
+ * a) if we have a PVID assigned, then all filters which are
+ * not marked as VLAN=PVID must be replaced with filters that
+ * are.
+ * b) otherwise, if we have any active VLANS, all filters
+ * which are marked as VLAN=-1 must be replaced with
+ * filters marked as VLAN=0
+ * c) finally, if we do not have any active VLANS, all filters
+ * which are marked as VLAN=0 must be replaced with filters
+ * marked as VLAN=-1
+ */
- return list_first_entry_or_null(&vsi->mac_filter_list,
- struct i40e_mac_filter, list);
-}
+ /* Update the filters about to be added in place */
+ hlist_for_each_entry(f, tmp_add_list, hlist) {
+ if (vsi->info.pvid && f->vlan != vsi->info.pvid)
+ f->vlan = vsi->info.pvid;
+ else if (vlan_filters && f->vlan == I40E_VLAN_ANY)
+ f->vlan = 0;
+ else if (!vlan_filters && f->vlan == 0)
+ f->vlan = I40E_VLAN_ANY;
+ }
+
+ /* Update the remaining active filters */
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ /* Combine the checks for whether a filter needs to be changed
+ * and then determine the new vlan inside the if block, in
+ * order to avoid duplicating code for adding the new filter
+ * then deleting the old filter.
+ */
+ if ((vsi->info.pvid && f->vlan != vsi->info.pvid) ||
+ (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
+ (!vlan_filters && f->vlan == 0)) {
+ /* Determine the new vlan we will be adding */
+ if (vsi->info.pvid)
+ new_vlan = vsi->info.pvid;
+ else if (vlan_filters)
+ new_vlan = 0;
+ else
+ new_vlan = I40E_VLAN_ANY;
-/**
- * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
- * @vsi: the VSI to be searched
- * @macaddr: the mac address to be removed
- * @is_vf: true if it is a VF
- * @is_netdev: true if it is a netdev
- *
- * Removes a given MAC address from a VSI, regardless of VLAN
- *
- * Returns 0 for success, or error
- **/
-int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
- bool is_vf, bool is_netdev)
-{
- struct i40e_mac_filter *f = NULL;
- int changed = 0;
+ /* Create the new filter */
+ add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
+ if (!add_head)
+ return -ENOMEM;
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if ((ether_addr_equal(macaddr, f->macaddr)) &&
- (is_vf == f->is_vf) &&
- (is_netdev == f->is_netdev)) {
- f->counter--;
- f->changed = true;
- changed = 1;
+ /* Put the replacement filter into the add list */
+ hash_del(&add_head->hlist);
+ hlist_add_head(&add_head->hlist, tmp_add_list);
+
+ /* Put the original filter into the delete list */
+ f->state = I40E_FILTER_REMOVE;
+ hash_del(&f->hlist);
+ hlist_add_head(&f->hlist, tmp_del_list);
}
}
- if (changed) {
- vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
- return 0;
- }
- return -ENOENT;
+
+ vsi->has_vlan_filter = !!vlan_filters;
+
+ return 0;
}
/**
* @vsi: the PF Main VSI - inappropriate for any other VSI
* @macaddr: the MAC address
*
- * Some older firmware configurations set up a default promiscuous vlan
- * filter that needs to be removed.
+ * Remove whatever filter the firmware set up so the driver can manage
+ * its own filtering intelligently.
**/
-static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
+static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
{
struct i40e_aqc_remove_macvlan_element_data element;
struct i40e_pf *pf = vsi->back;
- i40e_status ret;
/* Only appropriate for the PF main VSI */
if (vsi->type != I40E_VSI_MAIN)
- return -EINVAL;
+ return;
+
+ memset(&element, 0, sizeof(element));
+ ether_addr_copy(element.mac_addr, macaddr);
+ element.vlan_tag = 0;
+ /* Ignore error returns, some firmware does it this way... */
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
memset(&element, 0, sizeof(element));
ether_addr_copy(element.mac_addr, macaddr);
element.vlan_tag = 0;
+ /* ...and some firmware does it this way. */
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
- ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
- if (ret)
- return -ENOENT;
-
- return 0;
+ i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
}
/**
* @vsi: the VSI to be searched
* @macaddr: the MAC address
* @vlan: the vlan
- * @is_vf: make sure its a VF filter, else doesn't matter
- * @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns ptr to the filter object or NULL when no memory available.
*
- * NOTE: This function is expected to be called with mac_filter_list_lock
+ * NOTE: This function is expected to be called with mac_filter_hash_lock
* being held.
**/
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
- u8 *macaddr, s16 vlan,
- bool is_vf, bool is_netdev)
+ const u8 *macaddr, s16 vlan)
{
struct i40e_mac_filter *f;
+ u64 key;
if (!vsi || !macaddr)
return NULL;
- f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
+ f = i40e_find_filter(vsi, macaddr, vlan);
if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
if (!f)
- goto add_filter_out;
+ return NULL;
+
+ /* Update the boolean indicating if we need to function in
+ * vlan mode.
+ */
+ if (vlan >= 0)
+ vsi->has_vlan_filter = true;
ether_addr_copy(f->macaddr, macaddr);
f->vlan = vlan;
- f->changed = true;
+ /* If we're in overflow promisc mode, set the state directly
+ * to failed, so we don't bother to try sending the filter
+ * to the hardware.
+ */
+ if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))
+ f->state = I40E_FILTER_FAILED;
+ else
+ f->state = I40E_FILTER_NEW;
- INIT_LIST_HEAD(&f->list);
- list_add_tail(&f->list, &vsi->mac_filter_list);
- }
+ INIT_HLIST_NODE(&f->hlist);
- /* increment counter and add a new flag if needed */
- if (is_vf) {
- if (!f->is_vf) {
- f->is_vf = true;
- f->counter++;
- }
- } else if (is_netdev) {
- if (!f->is_netdev) {
- f->is_netdev = true;
- f->counter++;
- }
- } else {
- f->counter++;
- }
+ key = i40e_addr_to_hkey(macaddr);
+ hash_add(vsi->mac_filter_hash, &f->hlist, key);
- /* changed tells sync_filters_subtask to
- * push the filter down to the firmware
- */
- if (f->changed) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
}
-add_filter_out:
+ /* If we're asked to add a filter that has been marked for removal, it
+ * is safe to simply restore it to active state. __i40e_del_filter
+ * will have simply deleted any filters which were previously marked
+ * NEW or FAILED, so if it is currently marked REMOVE it must have
+ * previously been ACTIVE. Since we haven't yet run the sync filters
+ * task, just restore this filter to the ACTIVE state so that the
+ * sync task leaves it in place
+ */
+ if (f->state == I40E_FILTER_REMOVE)
+ f->state = I40E_FILTER_ACTIVE;
+
return f;
}
+/**
+ * __i40e_del_filter - Remove a specific filter from the VSI
+ * @vsi: VSI to remove from
+ * @f: the filter to remove from the list
+ *
+ * This function should be called instead of i40e_del_filter only if you know
+ * the exact filter you will remove already, such as via i40e_find_filter or
+ * i40e_find_mac.
+ *
+ * NOTE: This function is expected to be called with mac_filter_hash_lock
+ * being held.
+ * ANOTHER NOTE: This function MUST be called from within the context of
+ * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
+ * instead of list_for_each_entry().
+ **/
+void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
+{
+ if (!f)
+ return;
+
+ if ((f->state == I40E_FILTER_FAILED) ||
+ (f->state == I40E_FILTER_NEW)) {
+ /* this one never got added by the FW. Just remove it,
+ * no need to sync anything.
+ */
+ hash_del(&f->hlist);
+ kfree(f);
+ } else {
+ f->state = I40E_FILTER_REMOVE;
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ }
+}
+
/**
* i40e_del_filter - Remove a mac/vlan filter from the VSI
* @vsi: the VSI to be searched
* @macaddr: the MAC address
* @vlan: the vlan
- * @is_vf: make sure it's a VF filter, else doesn't matter
- * @is_netdev: make sure it's a netdev filter, else doesn't matter
*
- * NOTE: This function is expected to be called with mac_filter_list_lock
+ * NOTE: This function is expected to be called with mac_filter_hash_lock
* being held.
+ * ANOTHER NOTE: This function MUST be called from within the context of
+ * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
+ * instead of list_for_each_entry().
**/
-void i40e_del_filter(struct i40e_vsi *vsi,
- u8 *macaddr, s16 vlan,
- bool is_vf, bool is_netdev)
+void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
{
struct i40e_mac_filter *f;
if (!vsi || !macaddr)
return;
- f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
- if (!f || f->counter == 0)
- return;
+ f = i40e_find_filter(vsi, macaddr, vlan);
+ __i40e_del_filter(vsi, f);
+}
- if (is_vf) {
- if (f->is_vf) {
- f->is_vf = false;
- f->counter--;
- }
- } else if (is_netdev) {
- if (f->is_netdev) {
- f->is_netdev = false;
- f->counter--;
- }
- } else {
- /* make sure we don't remove a filter in use by VF or netdev */
- int min_f = 0;
+/**
+ * i40e_add_mac_filter - Add a MAC filter for all active VLANs
+ * @vsi: the VSI to be searched
+ * @macaddr: the mac address to be filtered
+ *
+ * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
+ * go through all the macvlan filters and add a macvlan filter for each
+ * unique vlan that already exists. If a PVID has been assigned, instead only
+ * add the macaddr to that VLAN.
+ *
+ * Returns last filter added on success, else NULL
+ **/
+struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
+ const u8 *macaddr)
+{
+ struct i40e_mac_filter *f, *add = NULL;
+ struct hlist_node *h;
+ int bkt;
- min_f += (f->is_vf ? 1 : 0);
- min_f += (f->is_netdev ? 1 : 0);
+ if (vsi->info.pvid)
+ return i40e_add_filter(vsi, macaddr,
+ le16_to_cpu(vsi->info.pvid));
- if (f->counter > min_f)
- f->counter--;
+ if (!i40e_is_vsi_in_vlan(vsi))
+ return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
+
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (f->state == I40E_FILTER_REMOVE)
+ continue;
+ add = i40e_add_filter(vsi, macaddr, f->vlan);
+ if (!add)
+ return NULL;
}
- /* counter == 0 tells sync_filters_subtask to
- * remove the filter from the firmware's list
- */
- if (f->counter == 0) {
- f->changed = true;
- vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ return add;
+}
+
+/**
+ * i40e_del_mac_filter - Remove a MAC filter from all VLANs
+ * @vsi: the VSI to be searched
+ * @macaddr: the mac address to be removed
+ *
+ * Removes a given MAC address from a VSI regardless of what VLAN it has been
+ * associated with.
+ *
+ * Returns 0 for success, or error
+ **/
+int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
+{
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ bool found = false;
+ int bkt;
+
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (ether_addr_equal(macaddr, f->macaddr)) {
+ __i40e_del_filter(vsi, f);
+ found = true;
+ }
}
+
+ if (found)
+ return 0;
+ else
+ return -ENOENT;
}
/**
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct sockaddr *addr = p;
- struct i40e_mac_filter *f;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
else
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_del_mac_filter(vsi, netdev->dev_addr);
+ i40e_add_mac_filter(vsi, addr->sa_data);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
ret = i40e_aq_mac_address_write(&vsi->back->hw,
I40E_AQC_WRITE_TYPE_LAA_WOL,
addr->sa_data, NULL);
- if (ret) {
- netdev_info(netdev,
- "Addr change for Main VSI failed: %d\n",
- ret);
- return -EADDRNOTAVAIL;
- }
- }
-
- if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
- struct i40e_aqc_remove_macvlan_element_data element;
-
- memset(&element, 0, sizeof(element));
- ether_addr_copy(element.mac_addr, netdev->dev_addr);
- element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
- i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
- } else {
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
- false, false);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- }
-
- if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
- struct i40e_aqc_add_macvlan_element_data element;
-
- memset(&element, 0, sizeof(element));
- ether_addr_copy(element.mac_addr, hw->mac.addr);
- element.flags = CPU_TO_LE16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
- i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
- } else {
- spin_lock_bh(&vsi->mac_filter_list_lock);
- f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
- false, false);
- if (f)
- f->is_laa = true;
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ if (ret)
+ netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
}
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
-
/* schedule our worker thread which will take care of
* applying the new filter changes
*/
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
/* Number of queues per enabled TC */
- /* In MFP case we can have a much lower count of msix
- * vectors available and so we need to lower the used
- * q count.
- */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
- qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
- else
- qcount = vsi->alloc_queue_pairs;
+ qcount = vsi->alloc_queue_pairs;
num_tc_qps = qcount / numtc;
num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
/* Setup queue offset/count for all TCs for given VSI */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
/* See if the given TC is enabled for the given VSI */
- if (vsi->tc_config.enabled_tc & BIT(i)) { /* TC is enabled */
+ if (vsi->tc_config.enabled_tc & BIT(i)) {
int pow, num_qps;
switch (vsi->type) {
ctxt->info.valid_sections |= cpu_to_le16(sections);
}
+/**
+ * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
+ * @netdev: the netdevice
+ * @addr: address to add
+ *
+ * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
+ * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
+ */
+static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (i40e_add_mac_filter(vsi, addr))
+ return 0;
+ else
+ return -ENOMEM;
+}
+
+/**
+ * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
+ * @netdev: the netdevice
+ * @addr: address to add
+ *
+ * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
+ * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
+ */
+static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ i40e_del_mac_filter(vsi, addr);
+
+ return 0;
+}
+
/**
* i40e_set_rx_mode - NDO callback to set the netdev filters
* @netdev: network interface device structure
#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_mac_filter *f, *ftmp;
struct i40e_vsi *vsi = np->vsi;
- struct netdev_hw_addr *uca;
-#ifdef NETDEV_HW_ADDR_T_MULTICAST
- struct netdev_hw_addr *mca;
-#else
- struct dev_mc_list *mca;
-#endif /* NETDEV_HW_ADDR_T_MULTICAST */
- struct netdev_hw_addr *ha;
-
- spin_lock_bh(&vsi->mac_filter_list_lock);
-
- /* add addr if not already in the filter list */
- netdev_for_each_uc_addr(uca, netdev) {
- if (!i40e_find_mac(vsi, uca->addr, false, true)) {
- if (i40e_is_vsi_in_vlan(vsi))
- i40e_put_mac_in_vlan(vsi, uca->addr,
- false, true);
- else
- i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
- false, true);
- }
- }
-
- netdev_for_each_mc_addr(mca, netdev) {
-#ifdef NETDEV_HW_ADDR_T_MULTICAST
- if (!i40e_find_mac(vsi, mca->addr, false, true)) {
- if (i40e_is_vsi_in_vlan(vsi))
- i40e_put_mac_in_vlan(vsi, mca->addr,
- false, true);
- else
- i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
- false, true);
- }
-#else
- if (!i40e_find_mac(vsi, mca->dmi_addr, false, true)) {
- if (i40e_is_vsi_in_vlan(vsi))
- i40e_put_mac_in_vlan(vsi, mca->dmi_addr,
- false, true);
- else
- i40e_add_filter(vsi, mca->dmi_addr,
- I40E_VLAN_ANY, false, true);
- }
-#endif /* NETDEV_HW_ADDR_T_MULTICAST */
- }
- /* remove filter if not in netdev list */
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
- if (!f->is_netdev)
- continue;
-
- netdev_for_each_mc_addr(mca, netdev)
-#ifdef NETDEV_HW_ADDR_T_MULTICAST
- if (ether_addr_equal(mca->addr, f->macaddr))
-#else
- if (ether_addr_equal(mca->dmi_addr, f->macaddr))
-#endif
- goto bottom_of_search_loop;
+ __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
+ __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
- netdev_for_each_uc_addr(uca, netdev)
- if (ether_addr_equal(uca->addr, f->macaddr))
- goto bottom_of_search_loop;
-
- for_each_dev_addr(netdev, ha)
- if (ether_addr_equal(ha->addr, f->macaddr))
- goto bottom_of_search_loop;
-
- /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
- i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true);
-
-bottom_of_search_loop:
- continue;
- }
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* check for other flag changes */
if (vsi->current_netdev_flags != vsi->netdev->flags) {
}
/**
- * i40e_mac_filter_entry_clone - Clones a MAC filter entry
- * @src: source MAC filter entry to be clones
+ * i40e_undo_filter_entries - Undo the changes made to MAC filter entries
+ * @vsi: Pointer to vsi struct
+ * @from: Pointer to list which contains MAC filter entries - changes to
+ * those entries needs to be undone.
*
- * Returns the pointer to newly cloned MAC filter entry or NULL
- * in case of error
+ * MAC filter entries from list were slated to be sent to firmware, either for
+ * addition or deletion.
**/
-static struct i40e_mac_filter *i40e_mac_filter_entry_clone(
- struct i40e_mac_filter *src)
+static void i40e_undo_filter_entries(struct i40e_vsi *vsi,
+ struct hlist_head *from)
{
struct i40e_mac_filter *f;
+ struct hlist_node *h;
- f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (!f)
- return NULL;
- *f = *src;
+ hlist_for_each_entry_safe(f, h, from, hlist) {
+ u64 key = i40e_addr_to_hkey(f->macaddr);
- INIT_LIST_HEAD(&f->list);
-
- return f;
+ /* Move the element back into MAC filter list*/
+ hlist_del(&f->hlist);
+ hash_add(vsi->mac_filter_hash, &f->hlist, key);
+ }
}
/**
- * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
- * @from: Pointer to list which contains MAC filter entries - changes to
- * those entries needs to be undone.
+ * i40e_update_filter_state - Update filter state based on return data
+ * from firmware
+ * @count: Number of filters added
+ * @add_list: return data from fw
+ * @head: pointer to first filter in current batch
*
- * MAC filter entries from list were slated to be removed from device.
+ * MAC filter entries from list were slated to be added to device. Returns
+ * number of successful filters. Note that 0 does NOT mean success!
**/
-static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
- struct list_head *from)
+static int
+i40e_update_filter_state(int count,
+ struct i40e_aqc_add_macvlan_element_data *add_list,
+ struct i40e_mac_filter *add_head)
{
- struct i40e_mac_filter *f, *ftmp;
+ int retval = 0;
+ int i;
- list_for_each_entry_safe(f, ftmp, from, list) {
- f->changed = true;
- /* Move the element back into MAC filter list*/
- list_move_tail(&f->list, &vsi->mac_filter_list);
+ for (i = 0; i < count; i++) {
+ /* Always check status of each filter. We don't need to check
+ * the firmware return status because we pre-set the filter
+ * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
+ * request to the adminq. Thus, if it no longer matches then
+ * we know the filter is active.
+ */
+ if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
+ add_head->state = I40E_FILTER_FAILED;
+ } else {
+ add_head->state = I40E_FILTER_ACTIVE;
+ retval++;
+ }
+
+ add_head = hlist_entry(add_head->hlist.next,
+ typeof(struct i40e_mac_filter),
+ hlist);
}
+
+ return retval;
}
/**
- * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
- * @from: Pointer to list which contains MAC filter entries - changes to
- * those entries needs to be undone.
- *
- * MAC filter entries from list were slated to be added from device.
- **/
-static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi)
+ * i40e_aqc_del_filters - Request firmware to delete a set of filters
+ * @vsi: ptr to the VSI
+ * @vsi_name: name to display in messages
+ * @list: the list of filters to send to firmware
+ * @num_del: the number of filters to delete
+ * @retval: Set to -EIO on failure to delete
+ *
+ * Send a request to firmware via AdminQ to delete a set of filters. Uses
+ * *retval instead of a return value so that success does not force ret_val to
+ * be set to 0. This ensures that a sequence of calls to this function
+ * preserve the previous value of *retval on successful delete.
+ */
+static
+void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
+ struct i40e_aqc_remove_macvlan_element_data *list,
+ int num_del, int *retval)
{
- struct i40e_mac_filter *f, *ftmp;
+ struct i40e_hw *hw = &vsi->back->hw;
+ i40e_status aq_ret;
+ int aq_err;
+
+ aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
+ aq_err = hw->aq.asq_last_status;
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- if (f->changed == false && f->counter != 0)
- f->changed = true;
+ /* Explicitly ignore and do not report when firmware returns ENOENT */
+ if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
+ *retval = -EIO;
+ dev_info(&vsi->back->pdev->dev,
+ "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
+ vsi_name, i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, aq_err));
}
}
/**
- * i40e_cleanup_add_list - Deletes the element from add list and release
- * memory
- * @from: Pointer to list which contains MAC filter entries
+ * i40e_aqc_add_filters - Request firmware to add a set of filters
+ * @vsi: ptr to the VSI
+ * @vsi_name: name to display in messages
+ * @list: the list of filters to send to firmware
+ * @add_head: Position in the add hlist
+ * @num_add: the number of filters to add
+ * @promisc_change: set to true on exit if promiscuous mode was forced on
+ *
+ * Send a request to firmware via AdminQ to add a chunk of filters. Will set
+ * promisc_changed to true if the firmware has run out of space for more
+ * filters.
+ */
+static
+void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
+ struct i40e_aqc_add_macvlan_element_data *list,
+ struct i40e_mac_filter *add_head,
+ int num_add, bool *promisc_changed)
+{
+ struct i40e_hw *hw = &vsi->back->hw;
+ int aq_err, fcnt;
+
+ i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
+ aq_err = hw->aq.asq_last_status;
+ fcnt = i40e_update_filter_state(num_add, list, add_head);
+
+ if (fcnt != num_add) {
+ *promisc_changed = true;
+ set_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ dev_warn(&vsi->back->pdev->dev,
+ "Error %s adding RX filters on %s, promiscuous mode forced on\n",
+ i40e_aq_str(hw, aq_err),
+ vsi_name);
+ }
+}
+
+/**
+ * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
+ * @vsi: pointer to the VSI
+ * @f: filter data
+ *
+ * This function sets or clears the promiscuous broadcast flags for VLAN
+ * filters in order to properly receive broadcast frames. Assumes that only
+ * broadcast filters are passed.
**/
-static void i40e_cleanup_add_list(struct list_head *add_list)
+static
+void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
+ struct i40e_mac_filter *f)
{
- struct i40e_mac_filter *f, *ftmp;
+ bool enable = f->state == I40E_FILTER_NEW;
+ struct i40e_hw *hw = &vsi->back->hw;
+ i40e_status aq_ret;
- list_for_each_entry_safe(f, ftmp, add_list, list) {
- list_del(&f->list);
- kfree(f);
+ if (f->vlan == I40E_VLAN_ANY) {
+ aq_ret = i40e_aq_set_vsi_broadcast(hw,
+ vsi->seid,
+ enable,
+ NULL);
+ } else {
+ aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
+ vsi->seid,
+ enable,
+ f->vlan,
+ NULL);
+ }
+
+ if (aq_ret) {
+ dev_warn(&vsi->back->pdev->dev,
+ "Error %s setting broadcast promiscuous mode on %s\n",
+ i40e_aq_str(hw, hw->aq.asq_last_status),
+ vsi_name);
+ f->state = I40E_FILTER_FAILED;
+ } else if (enable) {
+ f->state = I40E_FILTER_ACTIVE;
}
}
**/
int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
{
- struct list_head tmp_del_list, tmp_add_list;
- struct i40e_mac_filter *f, *ftmp, *fclone;
- bool promisc_forced_on = false;
- bool add_happened = false;
+ struct hlist_head tmp_add_list, tmp_del_list;
+ struct i40e_mac_filter *f, *add_head = NULL;
+ struct i40e_hw *hw = &vsi->back->hw;
+ unsigned int failed_filters = 0;
+ unsigned int vlan_filters = 0;
+ bool promisc_changed = false;
+ char vsi_name[16] = "PF";
int filter_list_len = 0;
- u32 changed_flags = 0;
i40e_status aq_ret = 0;
- bool err_cond = false;
- int retval = 0;
+ u32 changed_flags = 0;
+ struct hlist_node *h;
struct i40e_pf *pf;
int num_add = 0;
int num_del = 0;
- int aq_err = 0;
+ int retval = 0;
u16 cmd_flags;
+ int list_size;
+ int bkt;
/* empty array typed pointers, kcalloc later */
struct i40e_aqc_add_macvlan_element_data *add_list;
vsi->current_netdev_flags = vsi->netdev->flags;
}
- INIT_LIST_HEAD(&tmp_del_list);
- INIT_LIST_HEAD(&tmp_add_list);
+ INIT_HLIST_HEAD(&tmp_add_list);
+ INIT_HLIST_HEAD(&tmp_del_list);
+
+ if (vsi->type == I40E_VSI_SRIOV)
+ snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
+ else if (vsi->type != I40E_VSI_MAIN)
+ snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
- spin_lock_bh(&vsi->mac_filter_list_lock);
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- if (!f->changed)
- continue;
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ /* Create a list of filters to delete. */
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (f->state == I40E_FILTER_REMOVE) {
+ /* Move the element into temporary del_list */
+ hash_del(&f->hlist);
+ hlist_add_head(&f->hlist, &tmp_del_list);
- if (f->counter != 0)
+ /* Avoid counting removed filters */
continue;
- f->changed = false;
-
- /* Move the element into temporary del_list */
- list_move_tail(&f->list, &tmp_del_list);
- }
-
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- if (!f->changed)
- continue;
-
- if (f->counter == 0)
- continue;
- f->changed = false;
-
- /* Clone MAC filter entry and add into temporary list */
- fclone = i40e_mac_filter_entry_clone(f);
- if (!fclone) {
- err_cond = true;
- break;
}
- list_add_tail(&fclone->list, &tmp_add_list);
- }
+ if (f->state == I40E_FILTER_NEW) {
+ /* Move the element into temporary add_list */
+ hash_del(&f->hlist);
+ hlist_add_head(&f->hlist, &tmp_add_list);
+ }
- /* if failed to clone MAC filter entry - undo */
- if (err_cond) {
- i40e_undo_del_filter_entries(vsi, &tmp_del_list);
- i40e_undo_add_filter_entries(vsi);
+ /* Count the number of active (current and new) VLAN
+ * filters we have now. Does not count filters which
+ * are marked for deletion.
+ */
+ if (f->vlan > 0)
+ vlan_filters++;
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- if (err_cond) {
- i40e_cleanup_add_list(&tmp_add_list);
- retval = -ENOMEM;
- goto out;
- }
+ retval = i40e_correct_mac_vlan_filters(vsi,
+ &tmp_add_list,
+ &tmp_del_list,
+ vlan_filters);
+ if (retval)
+ goto err_no_memory_locked;
+
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
}
/* Now process 'del_list' outside the lock */
- if (!list_empty(&tmp_del_list)) {
- int del_list_size;
-
- filter_list_len = pf->hw.aq.asq_buf_size /
+ if (!hlist_empty(&tmp_del_list)) {
+ filter_list_len = hw->aq.asq_buf_size /
sizeof(struct i40e_aqc_remove_macvlan_element_data);
- del_list_size = filter_list_len *
+ list_size = filter_list_len *
sizeof(struct i40e_aqc_remove_macvlan_element_data);
- del_list = kzalloc(del_list_size, GFP_ATOMIC);
- if (!del_list) {
- i40e_cleanup_add_list(&tmp_add_list);
-
- /* Undo VSI's MAC filter entry element updates */
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_undo_del_filter_entries(vsi, &tmp_del_list);
- i40e_undo_add_filter_entries(vsi);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- retval = -ENOMEM;
- goto out;
- }
+ del_list = kzalloc(list_size, GFP_ATOMIC);
+ if (!del_list)
+ goto err_no_memory;
- list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
+ hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
cmd_flags = 0;
+ /* handle broadcast filters by updating the broadcast
+ * promiscuous flag instead of deleting a MAC filter.
+ */
+ if (is_broadcast_ether_addr(f->macaddr)) {
+ i40e_aqc_broadcast_filter(vsi, vsi_name, f);
+
+ hlist_del(&f->hlist);
+ kfree(f);
+ continue;
+ }
+
/* add to delete list */
ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
- del_list[num_del].vlan_tag =
- CPU_TO_LE16((u16)(f->vlan ==
- I40E_VLAN_ANY ? 0 : f->vlan));
+ if (f->vlan == I40E_VLAN_ANY) {
+ del_list[num_del].vlan_tag = 0;
+ cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ } else {
+ del_list[num_del].vlan_tag =
+ CPU_TO_LE16((u16)(f->vlan));
+ }
cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
del_list[num_del].flags = cmd_flags;
/* flush a full buffer */
if (num_del == filter_list_len) {
- aq_ret = i40e_aq_remove_macvlan(&pf->hw,
- vsi->seid, del_list, num_del,
- NULL);
- aq_err = pf->hw.aq.asq_last_status;
+ i40e_aqc_del_filters(vsi, vsi_name, del_list,
+ num_del, &retval);
+ memset(del_list, 0, list_size);
num_del = 0;
- memset(del_list, 0, del_list_size);
-
- if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) {
- retval = -EIO;
- dev_err(&pf->pdev->dev,
- "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
- }
}
/* Release memory for MAC filter entries which were
* synced up with HW.
*/
- list_del(&f->list);
+ hlist_del(&f->hlist);
kfree(f);
}
if (num_del) {
- aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
- del_list, num_del, NULL);
- aq_err = pf->hw.aq.asq_last_status;
- num_del = 0;
-
- if (aq_ret && aq_err != I40E_AQ_RC_ENOENT)
- dev_info(&pf->pdev->dev,
- "ignoring delete macvlan error, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ i40e_aqc_del_filters(vsi, vsi_name, del_list,
+ num_del, &retval);
}
kfree(del_list);
del_list = NULL;
}
- if (!list_empty(&tmp_add_list)) {
- int add_list_size;
-
- /* do all the adds now */
- filter_list_len = pf->hw.aq.asq_buf_size /
- sizeof(struct i40e_aqc_add_macvlan_element_data),
- add_list_size = filter_list_len *
+ if (!hlist_empty(&tmp_add_list)) {
+ /* Do all the adds now. */
+ filter_list_len = hw->aq.asq_buf_size /
sizeof(struct i40e_aqc_add_macvlan_element_data);
- add_list = kzalloc(add_list_size, GFP_ATOMIC);
- if (!add_list) {
- /* Purge element from temporary lists */
- i40e_cleanup_add_list(&tmp_add_list);
-
- /* Undo add filter entries from VSI MAC filter list */
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_undo_add_filter_entries(vsi);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- retval = -ENOMEM;
- goto out;
- }
+ list_size = filter_list_len *
+ sizeof(struct i40e_aqc_add_macvlan_element_data);
+ add_list = kzalloc(list_size, GFP_ATOMIC);
+ if (!add_list)
+ goto err_no_memory;
+
+ num_add = 0;
+ hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) {
+ if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+ &vsi->state)) {
+ f->state = I40E_FILTER_FAILED;
+ continue;
+ }
- list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
+ /* handle broadcast filters by updating the broadcast
+ * promiscuous flag instead of adding a MAC filter.
+ */
+ if (is_broadcast_ether_addr(f->macaddr)) {
+ u64 key = i40e_addr_to_hkey(f->macaddr);
+ i40e_aqc_broadcast_filter(vsi, vsi_name, f);
- add_happened = true;
- cmd_flags = 0;
+ hlist_del(&f->hlist);
+ hash_add(vsi->mac_filter_hash, &f->hlist, key);
+ continue;
+ }
/* add to add array */
+ if (num_add == 0)
+ add_head = f;
+ cmd_flags = 0;
ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
- add_list[num_add].vlan_tag =
- CPU_TO_LE16(
- (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
+ if (f->vlan == I40E_VLAN_ANY) {
+ add_list[num_add].vlan_tag = 0;
+ cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ } else {
+ add_list[num_add].vlan_tag =
+ CPU_TO_LE16((u16)(f->vlan));
+ }
add_list[num_add].queue_number = 0;
-
+ /* set invalid match method for later detection */
+ add_list[num_add].match_method =
+ CPU_TO_LE16((u16)I40E_AQC_MM_ERR_NO_RES);
cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
add_list[num_add].flags = CPU_TO_LE16(cmd_flags);
num_add++;
/* flush a full buffer */
if (num_add == filter_list_len) {
- aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
- add_list, num_add,
- NULL);
- aq_err = pf->hw.aq.asq_last_status;
+ i40e_aqc_add_filters(vsi, vsi_name, add_list,
+ add_head, num_add,
+ &promisc_changed);
+ memset(add_list, 0, list_size);
num_add = 0;
-
- if (aq_ret)
- break;
- memset(add_list, 0, add_list_size);
}
- /* Entries from tmp_add_list were cloned from MAC
- * filter list, hence clean those cloned entries
- */
- list_del(&f->list);
- kfree(f);
}
-
if (num_add) {
- aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
- add_list, num_add, NULL);
- aq_err = pf->hw.aq.asq_last_status;
- num_add = 0;
+ i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
+ num_add, &promisc_changed);
+ }
+ /* Now move all of the filters from the temp add list back to
+ * the VSI's list.
+ */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) {
+ u64 key = i40e_addr_to_hkey(f->macaddr);
+
+ hlist_del(&f->hlist);
+ hash_add(vsi->mac_filter_hash, &f->hlist, key);
}
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
kfree(add_list);
add_list = NULL;
+ }
- if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) {
- retval = i40e_aq_rc_to_posix(aq_ret, aq_err);
- dev_info(&pf->pdev->dev,
- "add filter failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
- if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
- !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
- &vsi->state)) {
- promisc_forced_on = true;
- set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
- &vsi->state);
- dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
- }
- }
+ /* Determine the number of active and failed filters. */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ vsi->active_filters = 0;
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+ if (f->state == I40E_FILTER_ACTIVE)
+ vsi->active_filters++;
+ else if (f->state == I40E_FILTER_FAILED)
+ failed_filters++;
}
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ /* If promiscuous mode has changed, we need to calculate a new
+ * threshold for when we are safe to exit
+ */
+ if (promisc_changed)
+ vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
+ /* Check if we are able to exit overflow promiscuous mode. We can
+ * safely exit if we didn't just enter, we no longer have any failed
+ * filters, and we have reduced filters below the threshold value.
+ */
+ if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) &&
+ !promisc_changed && !failed_filters &&
+ (vsi->active_filters < vsi->promisc_threshold)) {
+ dev_info(&pf->pdev->dev,
+ "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
+ vsi_name);
+ clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ promisc_changed = true;
+ vsi->promisc_threshold = 0;
+ }
/* if the VF is not trusted do not do promisc */
if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
NULL);
if (aq_ret) {
retval = i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
+ hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set multi promisc failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ "set multi promisc failed on %s, err %s aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
- if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
+ if ((changed_flags & IFF_PROMISC) || promisc_changed) {
bool cur_promisc;
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
if ((vsi->type == I40E_VSI_MAIN) &&
(pf->lan_veb != I40E_NO_VEB) &&
!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
- /* set defport ON for Main VSI instead of true promisc
- * this way we will get all unicast/multicast and vlan
- * promisc behavior but will not get VF or VMDq traffic
- * replicated on the Main VSI.
+ /* set defport ON for Main VSI instead of true promisc
+ * this way we will get all unicast/multicast and VLAN
+ * promisc behavior but will not get VF or VMDq traffic
+ * replicated on the Main VSI.
*/
if (pf->cur_promisc != cur_promisc) {
pf->cur_promisc = cur_promisc;
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ if (cur_promisc)
+ aq_ret =
+ i40e_aq_set_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ else
+ aq_ret =
+ i40e_aq_clear_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ if (aq_ret) {
+ retval = i40e_aq_rc_to_posix(aq_ret,
+ hw->aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "Set default VSI failed on %s, err %s, aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw,
+ hw->aq.asq_last_status));
+ }
}
} else {
aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
- &vsi->back->hw,
- vsi->seid,
- cur_promisc, NULL,
- true);
+ hw,
+ vsi->seid,
+ cur_promisc, NULL,
+ true);
if (aq_ret) {
- retval = i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
+ retval =
+ i40e_aq_rc_to_posix(aq_ret,
+ hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set unicast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ "set unicast promisc failed on %s, err %s, aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw,
+ hw->aq.asq_last_status));
}
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
- &vsi->back->hw,
- vsi->seid,
- cur_promisc,
- NULL);
+ hw,
+ vsi->seid,
+ cur_promisc, NULL);
if (aq_ret) {
- retval = i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
+ retval =
+ i40e_aq_rc_to_posix(aq_ret,
+ hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set multicast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ "set multicast promisc failed on %s, err %s, aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw,
+ hw->aq.asq_last_status));
}
}
- aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
- vsi->seid,
- cur_promisc, NULL);
- if (aq_ret) {
- retval = i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "set brdcast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
- }
}
out:
/* if something went wrong then set the changed flag so we try again */
clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
return retval;
+
+err_no_memory:
+ /* Restore elements on the temporary add and delete lists */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+err_no_memory_locked:
+ i40e_undo_filter_entries(vsi, &tmp_del_list);
+ i40e_undo_filter_entries(vsi, &tmp_add_list);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
+ return -ENOMEM;
}
/**
/* MTU < 68 is an error and causes problems on some kernels */
if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
return -EINVAL;
+#ifndef HAVE_NDO_FEATURES_CHECK
+ /* MTU < 576 causes problems with TSO */
+ if (new_mtu < 576) {
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ } else {
+#ifdef HAVE_NDO_SET_FEATURES
+#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+ if (netdev->wanted_features & NETIF_F_TSO)
+ netdev->features |= NETIF_F_TSO;
+ if (netdev->wanted_features & NETIF_F_TSO6)
+ netdev->features |= NETIF_F_TSO6;
+#else
+ if (netdev_extended(netdev)->wanted_features & NETIF_F_TSO)
+ netdev->features |= NETIF_F_TSO;
+ if (netdev_extended(netdev)->wanted_features & NETIF_F_TSO6)
+ netdev->features |= NETIF_F_TSO6;
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
+#else
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+#endif /* HAVE_NDO_SET_FEATURES */
+ }
+#else
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+#endif /* ! HAVE_NDO_FEATURES_CHECK */
netdev_info(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
"update vlan stripping failed, err %s aq_err %s\n",
i40e_stat_str(&vsi->back->hw, ret),
i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ vsi->back->hw.aq.asq_last_status));
}
}
}
/**
- * i40e_vsi_add_vlan - Add vsi membership for given vlan
+ * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
* @vsi: the vsi being configured
* @vid: vlan id to be added (0 = untagged only , -1 = any)
+ *
+ * This is a helper function for adding a new MAC/VLAN filter with the
+ * specified VLAN for each existing MAC address already in the hash table.
+ * This function does *not* perform any accounting to update filters based on
+ * vlan mode.
+ *
+ * NOTE: this function expects to be called while under the
+ * mac_filter_hash_lock
**/
-int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
+int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
{
struct i40e_mac_filter *f, *add_f;
- bool is_netdev, is_vf;
-
- is_vf = (vsi->type == I40E_VSI_SRIOV);
- is_netdev = !!(vsi->netdev);
-
- /* Locked once because all functions invoked below iterates list*/
- spin_lock_bh(&vsi->mac_filter_list_lock);
-
- if (is_netdev) {
- add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
- is_vf, is_netdev);
- if (!add_f) {
- dev_info(&vsi->back->pdev->dev,
- "Could not add vlan filter %d for %pM\n",
- vid, vsi->netdev->dev_addr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- return -ENOMEM;
- }
- }
+ struct hlist_node *h;
+ int bkt;
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (f->state == I40E_FILTER_REMOVE)
+ continue;
+ add_f = i40e_add_filter(vsi, f->macaddr, vid);
if (!add_f) {
dev_info(&vsi->back->pdev->dev,
"Could not add vlan filter %d for %pM\n",
vid, f->macaddr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
}
}
- /* Now if we add a vlan tag, make sure to check if it is the first
- * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
- * with 0, so we now accept untagged and specified tagged traffic
- * (and not any taged and untagged)
- */
- if (vid > 0) {
- if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
- I40E_VLAN_ANY,
- is_vf, is_netdev)) {
- i40e_del_filter(vsi, vsi->netdev->dev_addr,
- I40E_VLAN_ANY, is_vf, is_netdev);
- add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
- is_vf, is_netdev);
- if (!add_f) {
- dev_info(&vsi->back->pdev->dev,
- "Could not add filter 0 for %pM\n",
- vsi->netdev->dev_addr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- return -ENOMEM;
- }
- }
- }
+ return 0;
+}
- /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
- if (vid > 0 && !vsi->info.pvid) {
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
- is_vf, is_netdev))
- continue;
- i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
- is_vf, is_netdev);
- add_f = i40e_add_filter(vsi, f->macaddr,
- 0, is_vf, is_netdev);
- if (!add_f) {
- dev_info(&vsi->back->pdev->dev,
- "Could not add filter 0 for %pM\n",
- f->macaddr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- return -ENOMEM;
- }
- }
- }
+/**
+ * i40e_vsi_add_vlan - Add vsi membership for given vlan
+ * @vsi: the vsi being configured
+ * @vid: vlan id to be added
+ **/
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
+{
+ int err;
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ if (!(vid > 0) || vsi->info.pvid)
+ return -EINVAL;
+
+ /* Locked once because all functions invoked below iterates list*/
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ err = i40e_add_vlan_all_mac(vsi, vid);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ if (err)
+ return err;
/* schedule our worker thread which will take care of
* applying the new filter changes
}
/**
- * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
+ * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
* @vsi: the vsi being configured
* @vid: vlan id to be removed (0 = untagged only , -1 = any)
*
- * Return: 0 on success or negative otherwise
- **/
-int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
+ * This function should be used to remove all VLAN filters which match the
+ * given VID. It does not schedule the service event and does not take the
+ * mac_filter_hash_lock so it may be combined with other operations under
+ * a single invocation of the mac_filter_hash_lock.
+ *
+ * NOTE: this function expects to be called while under the
+ * mac_filter_hash_lock
+ */
+void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
{
- struct net_device *netdev = vsi->netdev;
- struct i40e_mac_filter *f, *add_f;
- bool is_vf, is_netdev;
- int filter_count = 0;
-
- is_vf = (vsi->type == I40E_VSI_SRIOV);
- is_netdev = !!(netdev);
-
- /* Locked once because all functions invoked below iterates list */
- spin_lock_bh(&vsi->mac_filter_list_lock);
-
- if (is_netdev)
- i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
-
- list_for_each_entry(f, &vsi->mac_filter_list, list)
- i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
-
- /* go through all the filters for this VSI and if there is only
- * vid == 0 it means there are no other filters, so vid 0 must
- * be replaced with -1. This signifies that we should from now
- * on accept any traffic (with any tag present, or untagged)
- */
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if (is_netdev) {
- if (f->vlan &&
- ether_addr_equal(netdev->dev_addr, f->macaddr))
- filter_count++;
- }
-
- if (f->vlan)
- filter_count++;
- }
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ int bkt;
- if (!filter_count && is_netdev) {
- i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
- f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
- is_vf, is_netdev);
- if (!f) {
- dev_info(&vsi->back->pdev->dev,
- "Could not add filter %d for %pM\n",
- I40E_VLAN_ANY, netdev->dev_addr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- return -ENOMEM;
- }
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (f->vlan == vid)
+ __i40e_del_filter(vsi, f);
}
+}
- if (!filter_count) {
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
- add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
- is_vf, is_netdev);
- if (!add_f) {
- dev_info(&vsi->back->pdev->dev,
- "Could not add filter %d for %pM\n",
- I40E_VLAN_ANY, f->macaddr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- return -ENOMEM;
- }
- }
- }
+/**
+ * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
+ * @vsi: the vsi being configured
+ * @vid: vlan id to be removed
+ **/
+void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
+{
+ if (!(vid > 0) || vsi->info.pvid)
+ return;
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_rm_vlan_all_mac(vsi, vid);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* schedule our worker thread which will take care of
* applying the new filter changes
*/
i40e_service_event_schedule(vsi->back);
- return 0;
}
/**
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
-#ifndef HAVE_VLAN_RX_REGISTER
int ret = 0;
-#endif
- if (vid > 4095)
+ if (vid >= VLAN_N_VID)
#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
return -EINVAL;
#else
return;
#endif
- netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
-
/* If the network stack called us with vid = 0 then
* it is asking to receive priority tagged packets with
* vlan id 0. Our HW receives them by default when configured
* extra filter for vlan 0 tagged packets.
*/
if (vid)
-#ifndef HAVE_VLAN_RX_REGISTER
- ret =
-#endif
- i40e_vsi_add_vlan(vsi, vid);
+ ret = i40e_vsi_add_vlan(vsi, vid);
#ifndef HAVE_VLAN_RX_REGISTER
- if (!ret && (vid < VLAN_N_VID))
+ if (!ret)
set_bit(vid, vsi->active_vlans);
-#endif /* HAVE_VLAN_RX_REGISTER */
+#endif /* !HAVE_VLAN_RX_REGISTER */
#ifndef HAVE_NETDEV_VLAN_FEATURES
/* Copy feature flags from netdev to the vlan netdev for this vid.
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
-
/* return code is ignored as there is nothing a user
* can do about failure to remove and a log message was
* already printed from the other function
struct i40e_q_vector *q_vector = vsi->q_vectors[i];
q_vector->itr_countdown = ITR_COUNTDOWN_START;
- q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
q_vector->rx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
q_vector->rx.itr);
- q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
q_vector->tx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
q_vector->tx.itr);
wr32(hw, I40E_PFINT_RATEN(vector - 1),
- INTRL_USEC_TO_REG(vsi->int_rate_limit));
+ i40e_intrl_usec_to_reg(vsi->int_rate_limit));
/* Linked list for the queuepairs assigned to this vector */
wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
/* set the ITR configuration */
q_vector->itr_countdown = ITR_COUNTDOWN_START;
- q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
q_vector->rx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
- q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
q_vector->tx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
return IRQ_HANDLED;
}
+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
+/**
+ * i40e_irq_affinity_notify - Callback for affinity changes
+ * @notify: context as to what irq was changed
+ * @mask: the new affinity mask
+ *
+ * This is a callback function used by the irq_set_affinity_notifier function
+ * so that we may register to receive changes to the irq affinity masks.
+ **/
+static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct i40e_q_vector *q_vector =
+ container_of(notify, struct i40e_q_vector, affinity_notify);
+
+ q_vector->affinity_mask = *mask;
+}
+
+/**
+ * i40e_irq_affinity_release - Callback for affinity notifier release
+ * @ref: internal core kernel usage
+ *
+ * This is a callback function used by the irq_set_affinity_notifier function
+ * to inform the current notification subscriber that they will no longer
+ * receive notifications.
+ **/
+static void i40e_irq_affinity_release(struct kref *ref) {}
+#endif /* HAVE_IRQ_AFFINITY_NOTIFY */
+
/**
* i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
* @vsi: the VSI being configured
int rx_int_idx = 0;
int tx_int_idx = 0;
int vector, err;
+ int irq_num;
for (vector = 0; vector < q_vectors; vector++) {
struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
+ irq_num = pf->msix_entries[base + vector].vector;
+
if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "TxRx", rx_int_idx++);
/* skip this unused q_vector */
continue;
}
- err = request_irq(pf->msix_entries[base + vector].vector,
+ err = request_irq(irq_num,
vsi->irq_handler,
0,
q_vector->name,
"MSIX request_irq failed, error: %d\n", err);
goto free_queue_irqs;
}
+
+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
+ /* register for affinity change notifications */
+ q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
+ q_vector->affinity_notify.release = i40e_irq_affinity_release;
+ irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
+#endif
#ifdef HAVE_IRQ_AFFINITY_HINT
/* assign the mask for this irq */
- irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
- &q_vector->affinity_mask);
-#endif /* HAVE_IRQ_AFFINITY_HINT */
+ irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
+#endif
}
vsi->irqs_ready = true;
free_queue_irqs:
while (vector) {
vector--;
+ irq_num = pf->msix_entries[base + vector].vector;
+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
+ irq_set_affinity_notifier(irq_num, NULL);
+#endif
#ifdef HAVE_IRQ_AFFINITY_HINT
- irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
- NULL);
+ irq_set_affinity_hint(irq_num, NULL);
#endif
- free_irq(pf->msix_entries[base + vector].vector,
- &(vsi->q_vectors[vector]));
+ free_irq(irq_num, &vsi->q_vectors[vector]);
}
return err;
}
}
/**
- * i40e_vsi_control_rings - Start or stop a VSI's rings
+ * i40e_vsi_start_rings - Start a VSI's rings
* @vsi: the VSI being configured
- * @enable: start or stop the rings
**/
-int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
+int i40e_vsi_start_rings(struct i40e_vsi *vsi)
{
int ret = 0;
/* do rx first for enable and last for disable */
- if (request) {
- ret = i40e_vsi_control_rx(vsi, request);
- if (ret)
- return ret;
- ret = i40e_vsi_control_tx(vsi, request);
- } else {
- /* Ignore return value, we need to shutdown whatever we can */
- i40e_vsi_control_tx(vsi, request);
- i40e_vsi_control_rx(vsi, request);
- }
+ ret = i40e_vsi_control_rx(vsi, true);
+ if (ret)
+ return ret;
+ ret = i40e_vsi_control_tx(vsi, true);
return ret;
}
+/**
+ * i40e_vsi_stop_rings - Stop a VSI's rings
+ * @vsi: the VSI being configured
+ **/
+void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
+{
+ /* do rx first for enable and last for disable
+ * Ignore return value, we need to shutdown whatever we can
+ */
+ i40e_vsi_control_tx(vsi, false);
+ i40e_vsi_control_rx(vsi, false);
+}
+
/**
* i40e_vsi_free_irq - Free the irq association with the OS
* @vsi: the VSI being configured
vsi->irqs_ready = false;
for (i = 0; i < vsi->num_q_vectors; i++) {
- u16 vector = i + base;
+ int irq_num;
+ u16 vector;
+
+ vector = i + base;
+ irq_num = pf->msix_entries[vector].vector;
/* free only the irqs that were actually requested */
if (!vsi->q_vectors[i] ||
!vsi->q_vectors[i]->num_ringpairs)
continue;
+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
+ /* clear the affinity notifier in the IRQ descriptor */
+ irq_set_affinity_notifier(irq_num, NULL);
+#endif
#ifdef HAVE_IRQ_AFFINITY_HINT
/* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(pf->msix_entries[vector].vector,
- NULL);
+ irq_set_affinity_hint(irq_num, NULL);
#endif
- synchronize_irq(pf->msix_entries[vector].vector);
- free_irq(pf->msix_entries[vector].vector,
- vsi->q_vectors[i]);
+ synchronize_irq(irq_num);
+ free_irq(irq_num, vsi->q_vectors[i]);
/* Tear down the interrupt queue link list
*
**/
static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
{
+ int i, tc_unused = 0;
u8 num_tc = 0;
- int i;
+ u8 ret = 0;
/* Scan the ETS Config Priority Table to find
* traffic class enabled for a given priority
- * and use the traffic class index to get the
- * number of traffic classes enabled
+ * and create a bitmask of enabled TCs
*/
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- if (dcbcfg->etscfg.prioritytable[i] > num_tc)
- num_tc = dcbcfg->etscfg.prioritytable[i];
- }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
- /* Traffic class index starts from zero so
- * increment to return the actual count
+ /* Now scan the bitmask to check for
+ * contiguous TCs starting with TC0
*/
- return num_tc + 1;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (num_tc & BIT(i)) {
+ if (!tc_unused) {
+ ret++;
+ } else {
+ pr_err("Non-contiguous TC - Disabling DCB\n");
+ return 1;
+ }
+ } else {
+ tc_unused = 1;
+ }
+ }
+
+ /* There is always at least TC0 */
+ if (!ret)
+ ret = 1;
+
+ return ret;
}
/**
else
return 1;/* Only TC0 */
- /* At least have TC0 */
- enabled_tc = (enabled_tc ? enabled_tc : 0x1);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (enabled_tc & BIT(i))
num_tc++;
return num_tc;
}
-/**
- * i40e_pf_get_default_tc - Get bitmap for first enabled TC
- * @pf: PF being queried
- *
- * Return a bitmap for first enabled traffic class for this PF.
- **/
-static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
-{
- u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
- u8 i = 0;
-
- if (!enabled_tc)
- return 0x1; /* TC0 */
-
- /* Find the first enabled TC */
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & BIT(i))
- break;
- }
-
- return BIT(i);
-}
-
/**
* i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
* @pf: PF being queried
{
/* If DCB is not enabled for this PF then just return default TC */
if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
- return i40e_pf_get_default_tc(pf);
+ return I40E_DEFAULT_TRAFFIC_CLASS;
/* SFP mode we want PF to be enabled for all TCs */
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
if (pf->hw.func_caps.iscsi)
return i40e_get_iscsi_tc_map(pf);
else
- return i40e_pf_get_default_tc(pf);
+ return I40E_DEFAULT_TRAFFIC_CLASS;
}
/**
/* Get the VSI level BW configuration per TC */
ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
- NULL);
+ NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi ets bw config, err %s aq_err %s\n",
bw_data.tc_bw_credits[i] = bw_share[i];
ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
- NULL);
+ NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
"AQ command Config VSI BW allocation per TC failed = %d\n",
"Update vsi tc config failed, err %s aq_err %s\n",
i40e_stat_str(&vsi->back->hw, ret),
i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ vsi->back->hw.aq.asq_last_status));
goto out;
}
/* update the local VSI info with updated queue map */
"Failed updating vsi bw info, err %s aq_err %s\n",
i40e_stat_str(&vsi->back->hw, ret),
i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ vsi->back->hw.aq.asq_last_status));
goto out;
}
if (v == pf->lan_vsi)
tc_map = i40e_pf_get_tc_map(pf);
else
- tc_map = i40e_pf_get_default_tc(pf);
+ tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
#ifdef I40E_FCOE
if (pf->vsi[v]->type == I40E_VSI_FCOE)
tc_map = i40e_get_fcoe_tc_map(pf);
DCB_CAP_DCBX_VER_IEEE;
pf->flags |= I40E_FLAG_DCB_CAPABLE;
- /* Enable DCB tagging only when more than one TC */
+ /* Enable DCB tagging only when more than one TC
+ * or explicity disable if only one TC
+ */
if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
pf->flags |= I40E_FLAG_DCB_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
dev_dbg(&pf->pdev->dev,
- "DCBX offload is supported for this PF.\n");
+ "DCBX offload is supported for this PF.\n");
}
} else {
dev_info(&pf->pdev->dev,
*/
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
{
+ enum i40e_aq_link_speed new_speed;
char *speed = "Unknown ";
char *fc = "Unknown";
+ char *fec = "";
+ char *an = "";
+
+ new_speed = vsi->back->hw.phy.link_info.link_speed;
- if (vsi->current_isup == isup)
+ if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
return;
vsi->current_isup = isup;
+ vsi->current_speed = new_speed;
if (!isup) {
netdev_info(vsi->netdev, "NIC Link is Down\n");
case I40E_LINK_SPEED_20GB:
speed = "20 G";
break;
+ case I40E_LINK_SPEED_25GB:
+ speed = "25 G";
+ break;
case I40E_LINK_SPEED_10GB:
speed = "10 G";
break;
break;
}
- netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
- speed, fc);
+ if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
+ fec = ", FEC: None";
+ an = ", Autoneg: False";
+
+ if (vsi->back->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
+ an = ", Autoneg: True";
+
+ if (vsi->back->hw.phy.link_info.fec_info &
+ I40E_AQ_CONFIG_FEC_KR_ENA)
+ fec = ", FEC: CL74 FC-FEC/BASE-R";
+ else if (vsi->back->hw.phy.link_info.fec_info &
+ I40E_AQ_CONFIG_FEC_RS_ENA)
+ fec = ", FEC: CL108 RS-FEC";
+ }
+
+ netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s, Flow Control: %s\n",
+ speed, fec, an, fc);
}
/**
i40e_configure_msi_and_legacy(vsi);
/* start rings */
- err = i40e_vsi_control_rings(vsi, true);
+ err = i40e_vsi_start_rings(vsi);
if (err)
return err;
(!(pf->hw.phy.link_info.an_info &
I40E_AQ_QUALIFIED_MODULE)))
netdev_err(vsi->netdev,
- "the driver failed to link because an unqualified module was detected.");
+ "the driver failed to link because an unqualified module was detected.");
}
/* replay flow filters */
/* reset fd counters */
pf->fd_add_err = pf->fd_atr_cnt = 0;
if (pf->fd_tcp_rule > 0) {
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
pf->fd_tcp_rule = 0;
usleep_range(1000, 2000);
i40e_down(vsi);
- /* Give a VF some time to respond to the reset. The
- * two second wait is based upon the watchdog cycle in
- * the VF driver.
- */
- if (vsi->type == I40E_VSI_SRIOV)
- msleep(2000);
i40e_up(vsi);
clear_bit(__I40E_CONFIG_BUSY, &pf->state);
}
netif_tx_disable(vsi->netdev);
}
i40e_vsi_disable_irq(vsi);
- i40e_vsi_control_rings(vsi, false);
+ i40e_vsi_stop_rings(vsi);
i40e_napi_disable_all(vsi);
for (i = 0; i < vsi->num_queue_pairs; i++) {
wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
#ifdef HAVE_VXLAN_RX_OFFLOAD
-#ifdef HAVE_VXLAN_CHECKS
-#if IS_ENABLED(CONFIG_VXLAN)
- vxlan_get_rx_port(netdev);
-#endif
-#else
#if IS_ENABLED(CONFIG_VXLAN)
vxlan_get_rx_port(netdev);
#endif
-#endif /* HAVE_VXLAN_CHECKS */
#endif /* HAVE_VXLAN_RX_OFFLOAD */
#ifdef HAVE_GENEVE_RX_OFFLOAD
#if IS_ENABLED(CONFIG_GENEVE)
if (pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)
geneve_get_rx_port(netdev);
#endif
-#endif
+#endif /* HAVE_GENEVE_RX_OFFLOAD */
+#ifdef HAVE_UDP_ENC_RX_OFFLOAD
+ udp_tunnel_get_rx_info(netdev);
+#endif /* HAVE_UDP_ENC_RX_OFFLOAD */
return 0;
}
i40e_cleanup_flex_filter(pf);
}
+/**
+ * i40e_cloud_filter_exit - Cleans up Cloud Filters
+ * @pf: Pointer to PF
+ *
+ * This function destroys the hlist which keeps all the Cloud Filters.
+ **/
+static void i40e_cloud_filter_exit(struct i40e_pf *pf)
+{
+ struct i40e_cloud_filter *cfilter;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(cfilter, node,
+ &pf->cloud_filter_list, cloud_node) {
+ hlist_del(&cfilter->cloud_node);
+ kfree(cfilter);
+ }
+ pf->num_cloud_filters = 0;
+}
+
/**
* i40e_close - Disables a network interface
* @netdev: network interface device structure
}
}
-/**
- * i40e_service_event_complete - Finish up the service event
- * @pf: board private structure
- **/
-static void i40e_service_event_complete(struct i40e_pf *pf)
-{
- WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
-
- /* flush memory to make sure state is correct before next watchdog */
- smp_mb__before_atomic();
- clear_bit(__I40E_SERVICE_SCHED, &pf->state);
-}
-
/**
* i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
* @pf: board private structure
val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
- I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
+ I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
return fcnt_prog;
}
(pf->fd_add_err == 0) ||
(i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
- pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) {
+ pf->hw_disabled_flags &= ~I40E_FLAG_FD_SB_ENABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
}
}
- /* Wait for some more space to be available to turn on ATR */
+
+ /* Wait for some more space to be available to turn on ATR. We also
+ * must check that no existing ntuple rules for TCP are in effect
+ */
if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (pf->fd_tcp_rule == 0)) {
+ pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
+ dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
}
}
int fd_room;
int reg;
- if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
- return;
-
if (!time_after(jiffies, pf->fd_flush_timestamp +
(I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
return;
}
pf->fd_flush_timestamp = jiffies;
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
/* flush all filters */
- wr32(&pf->hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
+ wr32(&pf->hw, I40E_PFQF_CTL_1,
+ I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
i40e_flush(&pf->hw);
pf->fd_flush_cnt++;
pf->fd_add_err = 0;
/* replay sideband filters */
i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
if (!disable_atr)
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
/* We can see up to 256 filter programming desc in transit if the filters are
* being applied really fast; before we see the first
- * filter miss error on rx queue 0. Accumulating enough error messages before
+ * filter miss error on Rx queue 0. Accumulating enough error messages before
* reacting will make sure we don't cause flush too often.
*/
#define I40E_MAX_FD_PROGRAM_ERROR 256
if (test_bit(__I40E_DOWN, &pf->state))
return;
- if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
- return;
-
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
i40e_fdir_flush_and_replay(pf);
old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
status = i40e_get_link_status(&pf->hw, &new_link);
+
+ /* On success, disable temp link polling */
+ if (status == I40E_SUCCESS)
+ if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)
+ pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING;
if (status != I40E_SUCCESS) {
+ /* Enable link polling temporarily until i40e_get_link_status
+ * returns I40E_SUCCESS
+ */
+ pf->flags |= I40E_FLAG_TEMP_LINK_POLLING;
dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
status);
return;
return;
/* make sure we don't do these things too often */
- if (time_before(jiffies,
- (pf->service_timer_previous + pf->service_timer_period)))
+ if (time_before(jiffies, (pf->service_timer_previous +
+ pf->service_timer_period)))
return;
pf->service_timer_previous = jiffies;
- if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
+ if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
+ (pf->flags & I40E_FLAG_TEMP_LINK_POLLING))
i40e_link_event(pf);
/* Update the stats for active netdevs so the network stack
"capability discovery failed, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, err),
i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ pf->hw.aq.asq_last_status));
return -ENODEV;
}
} while (err);
static void i40e_fdir_sb_setup(struct i40e_pf *pf)
{
struct i40e_vsi *vsi;
- int i;
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
/* find existing VSI and see if it needs configuring */
- vsi = NULL;
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
- vsi = pf->vsi[i];
- break;
- }
- }
+ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
/* create a new VSI if none exists */
if (!vsi) {
* @pf: board private structure
**/
static void i40e_fdir_teardown(struct i40e_pf *pf)
-{
- int i;
-
- i40e_fdir_filter_exit(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
- i40e_vsi_release(pf->vsi[i]);
- break;
- }
- }
+{
+ struct i40e_vsi *vsi;
+
+ i40e_fdir_filter_exit(pf);
+ i40e_cloud_filter_exit(pf);
+ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
+ if (vsi)
+ i40e_vsi_release(vsi);
}
/**
ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
if (ret)
dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Rebuild the VSIs and VEBs that existed before reset.
* They are still in our local switch element arrays, so only
dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ pf->hw.aq.asq_last_status));
}
/* reinit the misc interrupt */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
* The FW can still send Flow control frames if enabled.
*/
i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
- pf->main_vsi_seid);
+ pf->main_vsi_seid);
/* restart the VSIs that were rebuilt and running before the reset */
i40e_pf_unquiesce_all_vsi(pf);
I40E_GL_MDET_TX_EVENT_SHIFT;
u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
I40E_GL_MDET_TX_QUEUE_SHIFT) -
- pf->hw.func_caps.base_queue;
+ pf->hw.func_caps.base_queue;
if (netif_msg_tx_err(pf))
dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
event, queue, pf_num, vf_num);
I40E_GL_MDET_RX_EVENT_SHIFT;
u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
I40E_GL_MDET_RX_QUEUE_SHIFT) -
- pf->hw.func_caps.base_queue;
+ pf->hw.func_caps.base_queue;
if (netif_msg_rx_err(pf))
dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
event, queue, func);
if (reg & I40E_VP_MDET_TX_VALID_MASK) {
wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
vf->num_mdd_events++;
- dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", i);
+ dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ i);
}
reg = rd32(hw, I40E_VP_MDET_RX(i));
if (reg & I40E_VP_MDET_RX_VALID_MASK) {
wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
vf->num_mdd_events++;
- dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", i);
+ dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
+ i);
}
if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
i40e_flush(hw);
}
-#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_GENEVE_RX_OFFLOAD)
-#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
+#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_GENEVE_RX_OFFLOAD) || defined(HAVE_UDP_ENC_RX_OFFLOAD)
+#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) || defined(HAVE_UDP_ENC_RX_OFFLOAD)
/**
* i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
* @pf: board private structure
{
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
- __be16 port;
+ u16 port;
int i;
if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
if (pf->pending_udp_bitmap & BIT_ULL(i)) {
pf->pending_udp_bitmap &= ~BIT_ULL(i);
- port = pf->udp_ports[i].index;
+ port = pf->udp_ports[i].port;
if (port)
- ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
- pf->udp_ports[i].type,
- NULL, NULL);
+ ret = i40e_aq_add_udp_tunnel(hw, port,
+ pf->udp_ports[i].type,
+ NULL, NULL);
else
ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
dev_info(&pf->pdev->dev,
"%s vxlan port %d, index %d failed, err %s aq_err %s\n",
port ? "add" : "delete",
- ntohs(port), i,
+ port, i,
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
- pf->udp_ports[i].index = 0;
+ pf->udp_ports[i].port = 0;
} else {
if (port)
dev_dbg(&pf->pdev->dev,
"add vxlan port %d, index %d success\n",
- ntohs(port), i);
+ port, i);
else
dev_dbg(&pf->pdev->dev,
"delete vxlan port success\n");
}
}
-#endif /* CONFIG_GENEVE */
-#endif /* HAVE_VXLAN_RX_OFFLOAD || HAVE_GENEVE_RX_OFFLOAD */
+#endif /* CONFIG_GENEVE || CONFIG_VXLAN || HAVE_UDP_ENC_RX_OFFLOAD */
+#endif /* HAVE_VXLAN_RX_OFFLOAD || HAVE_GENEVE_RX_OFFLOAD || HAVE_UDP_ENC_RX_OFFLOAD */
/**
* i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
/* don't bother with service tasks if a reset is in progress */
if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
test_bit(__I40E_SUSPENDED, &pf->state)) {
- i40e_service_event_complete(pf);
return;
}
+ if (test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
+ return;
+
i40e_detect_recover_hung(pf);
i40e_sync_filters_subtask(pf);
i40e_reset_subtask(pf);
i40e_vc_process_vflr_event(pf);
i40e_watchdog_subtask(pf);
i40e_fdir_reinit_subtask(pf);
-#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_GENEVE_RX_OFFLOAD)
-#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
+#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_GENEVE_RX_OFFLOAD) || defined(HAVE_UDP_ENC_RX_OFFLOAD)
+#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) || defined(HAVE_UDP_ENC_RX_OFFLOAD)
i40e_sync_udp_filters_subtask(pf);
#endif
-#endif /* HAVE_VXLAN_RX_OFFLOAD || HAVE_GENEVE_RX_OFFLOAD */
+#endif /* HAVE_VXLAN_RX_OFFLOAD || HAVE_GENEVE_RX_OFFLOAD || HAVE_UDP_ENC_RX_OFFLOAD */
i40e_clean_adminq_subtask(pf);
- i40e_service_event_complete(pf);
+ /* flush memory to make sure state is correct before next watchdog */
+ smp_mb__before_atomic();
+ clear_bit(__I40E_SERVICE_SCHED, &pf->state);
/* If the tasks have taken longer than one timer cycle or there
* is more work to be done, reschedule the service task now
vsi->alloc_queue_pairs = pf->num_lan_qps;
vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
I40E_REQ_DESCRIPTOR_MULTIPLE);
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
vsi->num_q_vectors = pf->num_lan_msix;
+ }
else
vsi->num_q_vectors = 1;
set_bit(__I40E_DOWN, &vsi->state);
vsi->flags = 0;
vsi->idx = vsi_idx;
- vsi->rx_itr_setting = pf->rx_itr_default;
- vsi->tx_itr_setting = pf->tx_itr_default;
vsi->int_rate_limit = 0;
vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
- pf->rss_table_size : 64;
+ pf->rss_table_size : 64;
vsi->netdev_registered = false;
vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
- INIT_LIST_HEAD(&vsi->mac_filter_list);
+ hash_init(vsi->mac_filter_hash);
vsi->irqs_ready = false;
ret = i40e_set_num_rings_in_vsi(vsi);
i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
/* Initialize VSI lock */
- spin_lock_init(&vsi->mac_filter_list_lock);
+ spin_lock_init(&vsi->mac_filter_hash_lock);
pf->vsi[vsi_idx] = vsi;
ret = vsi_idx;
goto unlock_pf;
if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
+ tx_ring->tx_itr_setting = pf->tx_itr_default;
vsi->tx_rings[i] = tx_ring;
rx_ring = &tx_ring[1];
rx_ring->count = vsi->num_desc;
rx_ring->size = 0;
rx_ring->dcb_tc = 0;
+ rx_ring->rx_itr_setting = pf->rx_itr_default;
vsi->rx_rings[i] = rx_ring;
}
/**
* i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
* @pf: board private structure
- * @vectors: the number of MSI-X vectors to request
+ * @v_budget: the number of MSI-X vectors to request
*
* Returns the number of vectors reserved, or error
**/
-static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
+static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int v_budget)
{
- int err = 0;
-
- while (vectors >= I40E_MIN_MSIX) {
- err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
- if (err == 0) {
- /* good to go */
- break;
- } else if (err < 0) {
- /* total failure */
- dev_info(&pf->pdev->dev,
- "MSI-X vector reservation failed: %d\n", err);
- vectors = 0;
- break;
- }
- /* err > 0 is the hint for retry */
- dev_info(&pf->pdev->dev,
- "MSI-X vectors wanted %d, retrying with %d\n",
- vectors, err);
- vectors = err;
- }
+ int v_actual = 0;
- if (vectors > 0 && vectors < I40E_MIN_MSIX) {
+ v_actual = pci_enable_msix_range(pf->pdev,
+ pf->msix_entries,
+ I40E_MIN_MSIX,
+ v_budget);
+ if (v_actual < 0)
dev_info(&pf->pdev->dev,
- "Couldn't get enough vectors, only %d available\n",
- vectors);
- vectors = 0;
- }
+ "MSI-X vector reservation failed: %d\n", v_actual);
- return vectors;
+ return v_actual;
}
/**
static int i40e_init_msix(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
+ int cpus, extra_vectors;
int vectors_left;
int v_budget, i;
int v_actual;
vectors_left--;
}
- /* reserve vectors for the main PF traffic queues */
- pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
+ /* reserve some vectors for the main PF traffic queues. Initially we
+ * only reserve at most 50% of the available vectors, in the case that
+ * the number of online CPUs is large. This ensures that we can enable
+ * extra features as well. Once we've enabled the other features, we
+ * will use any remaining vectors to reach as close as we can to the
+ * number of online CPUs.
+ */
+ cpus = num_online_cpus();
+ pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
vectors_left -= pf->num_lan_msix;
- v_budget += pf->num_lan_msix;
/* reserve one vector for sideband flow director */
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
vectors_left -= vmdq_vecs;
}
+ /* On systems with a large number of SMP cores, we previously limited
+ * the number of vectors for num_lan_msix to be at most 50% of the
+ * available vectors, to allow for other features. Now, we add back
+ * the remaining vectors. However, we ensure that the total
+ * num_lan_msix will not exceed num_online_cpus(). To do this, we
+ * calculate the number of vectors we can add without going over the
+ * cap of CPUs. For systems with a small number of CPUs this will be
+ * zero.
+ */
+ extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
+ pf->num_lan_msix += extra_vectors;
+ vectors_left -= extra_vectors;
+
+ WARN(vectors_left < 0,
+ "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
+
+ v_budget += pf->num_lan_msix;
pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
GFP_KERNEL);
if (!pf->msix_entries)
for (i = 0; i < v_budget; i++)
pf->msix_entries[i].entry = i;
v_actual = i40e_reserve_msix_vectors(pf, v_budget);
+ if (v_actual < I40E_MIN_MSIX) {
+ pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
+ kfree(pf->msix_entries);
+ pf->msix_entries = NULL;
+ return -ENODEV;
+ }
if (v_actual != v_budget) {
/* If we have limited resources, we will start with no vectors
pf->num_vmdq_msix = 0;
}
- if (v_actual < I40E_MIN_MSIX) {
- pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
- kfree(pf->msix_entries);
- pf->msix_entries = NULL;
- return -ENODEV;
-
- } else if (v_actual == I40E_MIN_MSIX) {
+ if (v_actual == I40E_MIN_MSIX) {
/* Adjust for minimal MSIX use */
pf->num_vmdq_vsis = 0;
pf->num_vmdq_qps = 0;
}
if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
- (pf->num_vmdq_msix == 0)) {
+ (pf->num_vmdq_msix == 0)) {
dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
}
#endif
I40E_FLAG_RSS_ENABLED |
I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
return ret;
}
}
-
return ret;
}
if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE))
return 0;
- lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
- if (!lut)
- return -ENOMEM;
-
if (!vsi->rss_size)
vsi->rss_size = min_t(int, pf->alloc_rss_size,
vsi->num_queue_pairs);
+ if (!vsi->rss_size)
+ return -EINVAL;
+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
/* Use the user configured hash keys and lookup table if there is one,
* otherwise use default
*/
u32 *seed_dw = (u32 *)seed;
if (vsi->type == I40E_VSI_MAIN) {
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
- seed_dw[i]);
+ wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
} else if (vsi->type == I40E_VSI_SRIOV) {
for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw,
- I40E_VFQF_HKEY1(i, vf_id),
- seed_dw[i]);
+ wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
} else {
dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
}
if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
return -EINVAL;
for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw,
- I40E_VFQF_HLUT1(i, vf_id),
- lut_dw[i]);
+ wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
} else {
dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
}
* @rss_table_size: Lookup table size
* @rss_size: Range of queue number for hashing
*/
-static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
- u16 rss_table_size, u16 rss_size)
+void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
+ u16 rss_table_size, u16 rss_size)
{
u16 i;
if (!vsi->rss_size)
vsi->rss_size = min_t(int, pf->alloc_rss_size,
vsi->num_queue_pairs);
+ if (!vsi->rss_size)
+ return -EINVAL;
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
i40e_pf_config_rss(pf);
}
- dev_info(&pf->pdev->dev, "RSS count/HW max RSS count: %d/%d\n",
- pf->alloc_rss_size, pf->rss_size_max);
+ dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
+ vsi->req_queue_pairs, pf->rss_size_max);
return pf->alloc_rss_size;
}
| I40E_FLAG_WB_ON_ITR_CAPABLE
| I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE
| I40E_FLAG_NO_PCI_LINK_CHECK
- | I40E_FLAG_100M_SGMII_CAPABLE
| I40E_FLAG_USE_SET_LLDP_MIB
- | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
+ | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE
+ | I40E_FLAG_PTP_L4_CAPABLE;
#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
(pf->hw.aq.api_min_ver > 4))) {
/* Supported in FW API version higher than 1.4 */
pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
- pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+ pf->hw_disabled_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
/* supports mpls header skip and csum for following headers */
pf->flags |= I40E_FLAG_MPLS_HDR_OFFLOAD_CAPABLE;
} else {
- pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+ pf->hw_disabled_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
}
- if (i40e_is_mac_710(&pf->hw) &&
+ if ((pf->hw.mac.type == I40E_MAC_XL710) &&
(((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
(pf->hw.aq.fw_maj_ver < 4))) {
pf->flags |= I40E_FLAG_RESTART_AUTONEG;
}
/* Disable FW LLDP if FW < v4.3 */
- if (i40e_is_mac_710(&pf->hw) &&
+ if ((pf->hw.mac.type == I40E_MAC_XL710) &&
(((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
(pf->hw.aq.fw_maj_ver < 4)))
pf->flags |= I40E_FLAG_STOP_FW_LLDP;
/* Use the FW Set LLDP MIB API if FW > v4.40 */
- if (i40e_is_mac_710(&pf->hw) &&
+ if ((pf->hw.mac.type == I40E_MAC_XL710) &&
(((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
(pf->hw.aq.fw_maj_ver >= 5)))
pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB;
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
need_reset = true;
i40e_fdir_filter_exit(pf);
+ i40e_cloud_filter_exit(pf);
}
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ pf->hw_disabled_flags &= ~I40E_FLAG_FD_SB_ENABLED;
/* reset fd counters */
pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
pf->fdir_pf_active_filters = 0;
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
/* if ATR was auto disabled it can be re-enabled. */
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED)) {
+ pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
+ }
}
return need_reset;
}
#ifdef HAVE_NDO_SET_FEATURES
+/**
+ * i40e_clear_rss_lut - clear the rx hash lookup table
+ * @vsi: the VSI being configured
+ **/
+static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 vf_id = vsi->vf_id;
+ u8 i;
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
+ wr32(hw, I40E_PFQF_HLUT(i), 0);
+ } else if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
+ } else {
+ dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
+ }
+}
+
/**
* i40e_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
struct i40e_pf *pf = vsi->back;
bool need_reset;
+ if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
+ i40e_pf_config_rss(pf);
+ else if (!(features & NETIF_F_RXHASH) &&
+ netdev->features & NETIF_F_RXHASH)
+ i40e_clear_rss_lut(vsi);
+
#ifdef NETIF_F_HW_VLAN_CTAG_RX
if (features & NETIF_F_HW_VLAN_CTAG_RX)
#else
}
#endif /* HAVE_NDO_SET_FEATURES */
-#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_GENEVE_RX_OFFLOAD)
-#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
/**
* i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
* @pf: board private structure
*
* Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
**/
-static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
+static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
{
u8 i;
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- if (pf->udp_ports[i].index == port)
+ if (pf->udp_ports[i].port == port)
return i;
}
return i;
}
-#endif
-#endif /* HAVE_VXLAN_RX_OFFLOAD || HAVE_GENEVE_RX_OFFLOAD */
-#if defined(HAVE_VXLAN_RX_OFFLOAD)
-#if IS_ENABLED(CONFIG_VXLAN)
/**
- * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
+ * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
* @netdev: This physical port's netdev
- * @sa_family: Socket Family that VXLAN is notifying us about
- * @port: New UDP port number that VXLAN started listening to
+ * @ti: Tunnel endpoint information
**/
-static void i40e_add_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+__maybe_unused
+static void i40e_udp_tunnel_add(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ u16 port = ntohs(ti->port);
u8 next_idx;
u8 idx;
- if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
- return;
-
idx = i40e_get_udp_port_idx(pf, port);
/* Check if port already exists */
if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "vxlan port %d already offloaded\n",
- ntohs(port));
+ netdev_info(netdev, "port %d already offloaded\n", port);
return;
}
next_idx = i40e_get_udp_port_idx(pf, 0);
if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
- ntohs(port));
+ netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
+ port);
+ return;
+ }
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
+ return;
+ pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
+ break;
+ default:
return;
}
/* New port: add it and mark its index in the bitmap */
- pf->udp_ports[next_idx].index = port;
- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
+ pf->udp_ports[next_idx].port = port;
pf->pending_udp_bitmap |= BIT_ULL(next_idx);
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
}
/**
- * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
+ * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
* @netdev: This physical port's netdev
- * @sa_family: Socket Family that VXLAN is notifying us about
- * @port: UDP port number that VXLAN stopped listening to
+ * @ti: Tunnel endpoint information
**/
-static void i40e_del_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+__maybe_unused
+static void i40e_udp_tunnel_del(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ u16 port = ntohs(ti->port);
u8 idx;
- if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
- return;
-
idx = i40e_get_udp_port_idx(pf, port);
/* Check if port already exists */
- if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- /* if port exists, set it to 0 (mark for deletion)
- * and make it pending
- */
- pf->udp_ports[idx].index = 0;
- pf->pending_udp_bitmap |= BIT_ULL(idx);
- pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
- } else {
- netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
- ntohs(port));
+ if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
+ goto not_found;
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
+ goto not_found;
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
+ return;
+ if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
+ goto not_found;
+ break;
+ default:
+ goto not_found;
}
+
+ /* if port exists, set it to 0 (mark for deletion)
+ * and make it pending
+ */
+ pf->udp_ports[idx].port = 0;
+ pf->pending_udp_bitmap |= BIT_ULL(idx);
+ pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+
+ return;
+not_found:
+ netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
+ port);
+}
+
+#if defined(HAVE_VXLAN_RX_OFFLOAD)
+#if IS_ENABLED(CONFIG_VXLAN)
+/**
+ * i40e_add_vxlan_port - Get notifications about vxlan ports that come up
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that vxlan is notifying us about
+ * @port: New UDP port number that vxlan started listening to
+ **/
+static void i40e_add_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct udp_tunnel_info ti = {
+ .type = UDP_TUNNEL_TYPE_VXLAN,
+ .sa_family = sa_family,
+ .port = port,
+ };
+
+ i40e_udp_tunnel_add(netdev, &ti);
}
+/**
+ * i40e_del_vxlan_port - Get notifications about vxlan ports that go away
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that vxlan is notifying us about
+ * @port: UDP port number that vxlan stopped listening to
+ **/
+static void i40e_del_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct udp_tunnel_info ti = {
+ .type = UDP_TUNNEL_TYPE_VXLAN,
+ .sa_family = sa_family,
+ .port = port,
+ };
+
+ i40e_udp_tunnel_del(netdev, &ti);
+}
#endif /* CONFIG_VXLAN */
#endif /* HAVE_VXLAN_RX_OFFLOAD */
#if defined(HAVE_GENEVE_RX_OFFLOAD)
static void i40e_add_geneve_port(struct net_device *netdev,
sa_family_t sa_family, __be16 port)
{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
- u8 next_idx;
- u8 idx;
-
- idx = i40e_get_udp_port_idx(pf, port);
-
- /* Check if port already exists */
- if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "udp port %d already offloaded\n",
- ntohs(port));
- return;
- }
-
- /* Now check if there is space to add the new port */
- next_idx = i40e_get_udp_port_idx(pf, 0);
-
- if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n",
- ntohs(port));
- return;
- }
-
- /* New port: add it and mark its index in the bitmap */
- pf->udp_ports[next_idx].index = port;
- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
- pf->pending_udp_bitmap |= BIT_ULL(next_idx);
- pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+ struct udp_tunnel_info ti = {
+ .type = UDP_TUNNEL_TYPE_GENEVE,
+ .sa_family = sa_family,
+ .port = port,
+ };
- dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port));
+ i40e_udp_tunnel_add(netdev, &ti);
}
-/**
+/*
* i40e_del_geneve_port - Get notifications about GENEVE ports that go away
* @netdev: This physical port's netdev
* @sa_family: Socket Family that GENEVE is notifying us about
static void i40e_del_geneve_port(struct net_device *netdev,
sa_family_t sa_family, __be16 port)
{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
- u8 idx;
-
- idx = i40e_get_udp_port_idx(pf, port);
-
- /* Check if port already exists */
- if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- /* if port exists, set it to 0 (mark for deletion)
- * and make it pending
- */
- pf->udp_ports[idx].index = 0;
- pf->pending_udp_bitmap |= BIT_ULL(idx);
- pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+ struct udp_tunnel_info ti = {
+ .type = UDP_TUNNEL_TYPE_GENEVE,
+ .sa_family = sa_family,
+ .port = port,
+ };
- dev_info(&pf->pdev->dev, "deleting geneve port %d\n",
- ntohs(port));
- } else {
- netdev_warn(netdev, "geneve port %d was not found, not deleting\n",
- ntohs(port));
- }
+ i40e_udp_tunnel_del(netdev, &ti);
}
#endif /* CONFIG_GENEVE */
#ifdef HAVE_FDB_OPS
#ifdef USE_CONST_DEV_UC_CHAR
static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr,
+ struct net_device *dev,
+ const unsigned char *addr,
#ifdef HAVE_NDO_FDB_ADD_VID
- u16 vid,
+ u16 vid,
#endif
- u16 flags)
+ u16 flags)
#else
static int i40e_ndo_fdb_add(struct ndmsg *ndm,
- struct net_device *dev,
- unsigned char *addr,
+ struct net_device *dev,
+ unsigned char *addr,
#ifdef HAVE_NDO_FDB_ADD_VID
- u16 vid,
+ u16 vid,
#endif
- u16 flags)
+ u16 flags)
#endif
{
struct i40e_netdev_priv *np = netdev_priv(dev);
}
#ifdef HAVE_NDO_FEATURES_CHECK
-/* Hardware supports L4 tunnel length of 128B (=2^7) which includes
- * inner mac plus all inner ethertypes.
- */
-#define I40E_MAX_TUNNEL_HDR_LEN 128
/**
* i40e_features_check - Validate encapsulated packet conforms to limits
* @skb: skb buff
struct net_device *dev,
netdev_features_t features)
{
- if (!skb->encapsulation)
+ size_t len;
+
+ /* No point in doing any of this if neither checksum nor GSO are
+ * being requested for this frame. We can rule out both by just
+ * checking for CHECKSUM_PARTIAL
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
return features;
- /* prevent tunnel headers that are too long to offload from
- * being sent to the hardware
+ /* We cannot support GSO if the MSS is going to be less than
+ * 64 bytes. If it is then we need to drop support for GSO.
+ */
+ if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
+ features &= ~NETIF_F_GSO_MASK;
+
+ /* MACLEN can support at most 63 words */
+ len = skb_network_header(skb) - skb->data;
+ if (len & ~(63 * 2))
+ goto out_err;
+
+ /* IPLEN and EIPLEN can support at most 127 dwords */
+ len = skb_transport_header(skb) - skb_network_header(skb);
+ if (len & ~(127 * 4))
+ goto out_err;
+
+ if (skb->encapsulation) {
+ /* L4TUNLEN can support 127 words */
+ len = skb_inner_network_header(skb) - skb_transport_header(skb);
+ if (len & ~(127 * 2))
+ goto out_err;
+
+ /* IPLEN can support at most 127 dwords */
+ len = skb_inner_transport_header(skb) -
+ skb_inner_network_header(skb);
+ if (len & ~(127 * 4))
+ goto out_err;
+ }
+
+ /* No need to validate L4LEN as TCP is the only protocol with a
+ * a flexible value and we support all possible values supported
+ * by TCP, which is at most 15 dwords
*/
- if (skb_inner_network_header(skb) - skb_transport_header(skb) >
- I40E_MAX_TUNNEL_HDR_LEN)
- return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
return features;
+out_err:
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
#endif /* HAVE_NDO_FEATURES_CHECK */
#ifdef USE_CONST_DEV_UC_CHAR
#ifdef HAVE_NDO_FDB_ADD_VID
static int i40e_ndo_fdb_del(struct ndmsg *ndm,
- struct net_device *dev,
- const unsigned char *addr,
- u16 vid)
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 vid)
#else
static int i40e_ndo_fdb_del(struct ndmsg *ndm,
- struct net_device *dev,
- const unsigned char *addr)
+ struct net_device *dev,
+ const unsigned char *addr)
#endif
#else
#ifdef HAVE_NDO_FDB_ADD_VID
static int i40e_ndo_fdb_del(struct ndmsg *ndm,
- struct net_device *dev,
- unsigned char *addr,
- u16 vid)
+ struct net_device *dev,
+ unsigned char *addr,
+ u16 vid)
#else
static int i40e_ndo_fdb_del(struct ndmsg *ndm,
- struct net_device *dev,
- unsigned char *addr)
+ struct net_device *dev,
+ unsigned char *addr)
#endif
#endif
{
.ndo_fcoe_enable = i40e_fcoe_enable,
.ndo_fcoe_disable = i40e_fcoe_disable,
#endif
+#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT
+/* RHEL7 requires this to be defined to enable extended ops. RHEL7 uses the
+ * function get_ndo_ext to retrieve offsets for extended fields from with the
+ * net_device_ops struct and ndo_size is checked to determine whether or not
+ * the offset is valid.
+ */
+ .ndo_size = sizeof(const struct net_device_ops),
+#endif
#ifdef IFLA_VF_MAX
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
#endif
#ifdef HAVE_NDO_SET_VF_TRUST
+#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT
+ .extended.ndo_set_vf_trust = i40e_ndo_set_vf_trust,
+#else
.ndo_set_vf_trust = i40e_ndo_set_vf_trust,
-#endif
+#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */
+#endif /* HAVE_NDO_SET_VF_TRUST */
#endif /* IFLA_VF_MAX */
#ifdef HAVE_VXLAN_RX_OFFLOAD
#if IS_ENABLED(CONFIG_VXLAN)
.ndo_del_geneve_port = i40e_del_geneve_port,
#endif
#endif /* HAVE_GENEVE_RX_OFFLOAD */
+#ifdef HAVE_UDP_ENC_RX_OFFLOAD
+ .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
+ .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
+#endif
#ifdef HAVE_NDO_GET_PHYS_PORT_ID
.ndo_get_phys_port_id = i40e_get_phys_port_id,
#endif /* HAVE_NDO_GET_PHYS_PORT_ID */
**/
static int i40e_config_netdev(struct i40e_vsi *vsi)
{
- u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_netdev_priv *np;
struct net_device *netdev;
+ u8 broadcast[ETH_ALEN];
u8 mac_addr[ETH_ALEN];
int etherdev_size;
-#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
- u32 hw_features;
-#endif
+ netdev_features_t hw_enc_features;
+ netdev_features_t hw_features;
etherdev_size = sizeof(struct i40e_netdev_priv);
netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
vsi->netdev = netdev;
np = netdev_priv(netdev);
np->vsi = vsi;
-#ifdef HAVE_ENCAP_CSUM_OFFLOAD
- netdev->hw_enc_features |= NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
+
+ hw_enc_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+#ifdef NETIF_F_IPV6_CSUM
+ NETIF_F_IPV6_CSUM |
+#endif
+ NETIF_F_HIGHDMA |
+#ifdef NETIF_F_SOFT_FEATURES
+ NETIF_F_SOFT_FEATURES |
+#endif
+ NETIF_F_TSO |
#ifdef HAVE_ENCAP_TSO_OFFLOAD
- NETIF_F_TSO6 |
- NETIF_F_TSO_ECN |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
#ifdef HAVE_GRE_ENCAP_OFFLOAD
- NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE |
+#ifdef NETIF_F_GSO_IPXIP4
+ NETIF_F_GSO_IPXIP4 |
+#else
+#ifdef NETIF_F_GSO_IPIP
+ NETIF_F_GSO_IPIP |
+#endif
+#ifdef NETIF_F_GSO_SIT
+ NETIF_F_GSO_SIT |
#endif
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- 0;
-#else /* HAVE_ENCAP_TSO_OFFLOAD */
- NETIF_F_SG;
+#endif
+#endif
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
#endif /* HAVE_ENCAP_TSO_OFFLOAD */
-#endif /* HAVE_ENCAP_CSUM_OFFLOAD */
+ NETIF_F_SCTP_CRC |
+#ifdef NETIF_F_RXHASH
+ NETIF_F_RXHASH |
+#endif
+#ifdef HAVE_NDO_SET_FEATURES
+ NETIF_F_RXCSUM |
+#endif
+ 0;
- netdev->features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_SCTP_CRC |
- NETIF_F_HIGHDMA |
-#ifdef HAVE_ENCAP_TSO_OFFLOAD
- NETIF_F_GSO_UDP_TUNNEL |
-#ifdef HAVE_GRE_ENCAP_OFFLOAD
- NETIF_F_GSO_GRE |
+ if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
+ hw_enc_features ^= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+#ifdef HAVE_ENCAP_CSUM_OFFLOAD
+ netdev->hw_enc_features |= hw_enc_features;
#endif
+
+#ifdef HAVE_NETDEV_VLAN_FEATURES
+ /* record features VLANs can make use of */
+ netdev->vlan_features |= hw_enc_features;
#endif
+
+ /* copy netdev features into list of user selectable features */
+ hw_features = hw_enc_features |
#ifdef NETIF_F_HW_VLAN_CTAG_RX
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
#else /* NETIF_F_HW_VLAN_CTAG_RX */
- NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER |
-#endif /* NETIF_F_HW_VLAN_CTAG_RX */
-#ifdef NETIF_F_IPV6_CSUM
- NETIF_F_IPV6_CSUM |
-#endif
- NETIF_F_TSO |
- NETIF_F_TSO_ECN |
- NETIF_F_TSO6 |
-#ifdef HAVE_NDO_SET_FEATURES
- NETIF_F_RXCSUM |
-#endif
-#ifdef NETIF_F_RXHASH
- NETIF_F_RXHASH |
-#endif /* NETIF_F_RXHASH */
- 0;
-#if defined(HAVE_NDO_SET_FEATURES) || defined(ETHTOOL_GRXRINGS)
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX;
+#endif /* !NETIF_F_HW_VLAN_CTAG_RX */
+#if defined(HAVE_NDO_SET_FEATURES) || defined(ETHTOOL_GRXRINGS)
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
- netdev->features |= NETIF_F_NTUPLE;
+ hw_features |= NETIF_F_NTUPLE;
#endif
- if (pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
- netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
#ifdef HAVE_NDO_SET_FEATURES
- /* copy netdev features into list of user selectable features */
#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
- hw_features = get_netdev_hw_features(netdev);
- hw_features |= netdev->features;
+ hw_features |= get_netdev_hw_features(netdev);
set_netdev_hw_features(netdev, hw_features);
#else
- netdev->hw_features |= netdev->features;
+ netdev->hw_features |= hw_features;
#endif
+#endif /* HAVE_NDO_SET_FEATURES */
+
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
#else
+ netdev->features |= hw_features | NETIF_F_HW_VLAN_FILTER;
+#endif
+
+#ifndef HAVE_NDO_SET_FEATURES
#ifdef NETIF_F_GRO
netdev->features |= NETIF_F_GRO;
-#endif /* NETIF_F_GRO */
-#endif /* HAVE_NDO_SET_FEATURES */
+#endif
+#endif
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
* default a MAC-VLAN filter that accepts any tagged packet
* which must be replaced by a normal filter.
*/
- if (!i40e_rm_default_mac_filter(vsi, mac_addr)) {
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_add_filter(vsi, mac_addr,
- I40E_VLAN_ANY, false, true);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- }
+ i40e_rm_default_mac_filter(vsi, mac_addr);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_add_mac_filter(vsi, mac_addr);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
pf->vsi[pf->lan_vsi]->netdev->name);
random_ether_addr(mac_addr);
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_add_mac_filter(vsi, mac_addr);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
}
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ /* Add the broadcast filter so that we initially will receive
+ * broadcast packets. Note that when a new VLAN is first added the
+ * driver will convert all filters marked I40E_VLAN_ANY into VLAN
+ * specific filters as part of transitioning into "vlan" operation.
+ * When more VLANs are added, the driver will copy each existing MAC
+ * filter and add it for the new VLAN.
+ *
+ * Broadcast filters are handled specially by
+ * i40e_sync_filters_subtask, as the driver must to set the broadcast
+ * promiscuous bit instead of adding this directly as a MAC/VLAN
+ * filter. The subtask will update the correct broadcast promiscuous
+ * bits as VLANs become active or inactive.
+ */
+ eth_broadcast_addr(broadcast);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_add_mac_filter(vsi, broadcast);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
ether_addr_copy(netdev->dev_addr, mac_addr);
#ifdef ETHTOOL_GPERMADDR
static int i40e_add_vsi(struct i40e_vsi *vsi)
{
int ret = -ENODEV;
- u8 laa_macaddr[ETH_ALEN];
- bool found_laa_mac_filter = false;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_vsi_context ctxt;
- struct i40e_mac_filter *f, *ftmp;
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ int bkt;
u8 enabled_tc = 0x1; /* TC0 enabled */
int f_count = 0;
"couldn't get PF vsi config, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ pf->hw.aq.asq_last_status));
return -ENOENT;
}
vsi->info = ctxt.info;
/* MFP mode setup queue map and update VSI */
if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
- !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
+ !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
memset(&ctxt, 0, sizeof(ctxt));
ctxt.seid = pf->main_vsi_seid;
ctxt.pf_num = pf->hw.pf_id;
"add vsi failed, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
"Note: VSI source pruning is not being set correctly by FW\n");
}
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ vsi->active_filters = 0;
+ clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* If macvlan filters already exist, force them to get loaded */
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- f->changed = true;
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ f->state = I40E_FILTER_NEW;
f_count++;
-
- /* Expected to have only one MAC filter entry for LAA in list */
- if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
- ether_addr_copy(laa_macaddr, f->macaddr);
- found_laa_mac_filter = true;
- }
- }
- spin_unlock_bh(&vsi->mac_filter_list_lock);
-
- if (found_laa_mac_filter) {
- struct i40e_aqc_remove_macvlan_element_data element;
-
- memset(&element, 0, sizeof(element));
- ether_addr_copy(element.mac_addr, laa_macaddr);
- element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
- ret = i40e_aq_remove_macvlan(hw, vsi->seid,
- &element, 1, NULL);
- if (ret) {
- /* some older FW has a different default */
- element.flags |=
- I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
- i40e_aq_remove_macvlan(hw, vsi->seid,
- &element, 1, NULL);
- }
-
- i40e_aq_mac_address_write(hw,
- I40E_AQC_WRITE_TYPE_LAA_WOL,
- laa_macaddr, NULL);
}
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
if (f_count) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
**/
int i40e_vsi_release(struct i40e_vsi *vsi)
{
- struct i40e_mac_filter *f, *ftmp;
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
struct i40e_veb *veb = NULL;
struct i40e_pf *pf;
u16 uplink_seid;
- int i, n;
+ int i, n, bkt;
pf = vsi->back;
i40e_vsi_disable_irq(vsi);
}
- spin_lock_bh(&vsi->mac_filter_list_lock);
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
- i40e_del_filter(vsi, f->macaddr, f->vlan,
- f->is_vf, f->is_netdev);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+
+ /* clear the sync flag on all filters */
+ if (vsi->netdev) {
+ __dev_uc_unsync(vsi->netdev, NULL);
+ __dev_mc_unsync(vsi->netdev, NULL);
+ }
+
+ /* make sure any remaining filters are marked for deletion */
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
+ __i40e_del_filter(vsi, f);
+
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
i40e_sync_vsi_filters(vsi);
pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+ if (vsi->type == I40E_VSI_MAIN)
+ i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
/* assign it some queues */
ret = i40e_alloc_rings(vsi);
return NULL;
}
#ifdef HAVE_BRIDGE_ATTRIBS
- /* We come up by default in VEPA mode if sriov is not
+ /* We come up by default in VEPA mode if SRIOV is not
* already enabled, in which case we can't force VEPA
* mode.
*/
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
{
struct i40e_pf *pf = veb->pf;
- bool is_default = pf->cur_promisc;
bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
int ret;
- /* get a VEB from the hardware */
ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
- veb->enabled_tc, is_default,
+ veb->enabled_tc, false,
&veb->seid, enable_stats, NULL);
+
+ /* get a VEB from the hardware */
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't add VEB, err %s aq_err %s\n",
"get switch config failed err %s aq_err %s\n",
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ pf->hw.aq.asq_last_status));
kfree(aq_buf);
return -ENOENT;
}
* support limited promisc or true promisc
* when user requests promisc. The default is limited
* promisc.
- */
+ */
+
if ((pf->hw.pf_id == 0) &&
!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
"couldn't set switch config bits, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
}
}
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_VMDQ_ENABLED);
} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
/* Not enough queues for all TCs */
if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
(queues_left < I40E_MAX_TRAFFIC_CLASS)) {
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED);
dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
pf->num_lan_qps = max_t(int, pf->rss_size_max,
hw->subsystem_device_id = pdev->subsystem_device;
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
+ hw->bus.bus_id = pdev->bus->number;
pf->instance = pfs_found;
/* set up the spinlocks for the AQ, do this only once in probe
err = i40e_init_shared_code(hw);
if (err) {
- dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", err);
+ dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
+ err);
goto err_pf_reset;
}
err = i40e_init_pf_dcb(pf);
if (err) {
dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
/* Continue without DCB enabled */
}
#endif /* CONFIG_DCB */
err = i40e_set_fc(hw, &set_fc_aq_fail, true);
if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
dev_dbg(&pf->pdev->dev,
- "Set fc with err %s aq_err %s on get_phy_cap\n",
- i40e_stat_str(hw, err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ "Set fc with err %s aq_err %s on get_phy_cap\n",
+ i40e_stat_str(hw, err),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
dev_dbg(&pf->pdev->dev,
- "Set fc with err %s aq_err %s on set_phy_config\n",
- i40e_stat_str(hw, err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ "Set fc with err %s aq_err %s on set_phy_config\n",
+ i40e_stat_str(hw, err),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
dev_dbg(&pf->pdev->dev,
- "Set fc with err %s aq_err %s on get_link_info\n",
- i40e_stat_str(hw, err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ "Set fc with err %s aq_err %s on get_link_info\n",
+ i40e_stat_str(hw, err),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
/* if FDIR VSI was set up, start it now */
for (i = 0; i < pf->num_alloc_vsi; i++) {
dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, err),
i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ pf->hw.aq.asq_last_status));
}
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
#endif
#define PCI_SPEED_SIZE 8
#define PCI_WIDTH_SIZE 8
- /* Devices on the IOSF bus do not have this information right
+ /* Devices on the IOSF bus do not have this information
* and will report PCI Gen 1 x 1 by default so don't bother
* checking them.
*/
char width[PCI_WIDTH_SIZE] = "Unknown";
/* Get the negotiated link width and speed from PCI config
- * space */
+ * space
+ */
pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
&link_status);
dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
i40e_stat_str(&pf->hw, err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
- pf->hw.phy.phy_types = LE32_TO_CPU(abilities.phy_type);
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
* The FW can still send Flow control frames if enabled.
*/
i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
- pf->main_vsi_seid);
+ pf->main_vsi_seid);
if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
(pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
- pf->flags |= I40E_FLAG_HAVE_10GBASET_PHY;
-
+ pf->flags |= I40E_FLAG_PHY_CONTROLS_LEDS;
+ if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
+ pf->flags |= I40E_FLAG_HAVE_CRT_RETIMER;
/* print a string summarizing features */
i40e_print_features(pf);
unmap:
/* shutdown the adminq */
- ret_code = i40e_shutdown_adminq(hw);
- if (ret_code)
- dev_warn(&pdev->dev,
- "Failed to destroy the Admin Queue resources: %d\n",
- ret_code);
+ i40e_shutdown_adminq(hw);
/* destroy the locks only once, here */
i40e_destroy_spinlock_d(&hw->aq.arq_spinlock);
(pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
i40e_stop_misc_vector(pf);
-
+ if (pf->msix_entries) {
+ synchronize_irq(pf->msix_entries[0].vector);
+ free_irq(pf->msix_entries[0].vector, pf);
+ }
retval = pci_save_state(pdev);
if (retval)
return retval;
if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
clear_bit(__I40E_DOWN, &pf->state);
rtnl_lock();
+ if (pf->msix_entries) {
+ err = request_irq(pf->msix_entries[0].vector,
+ i40e_intr, 0, pf->int_name, pf);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "request_irq for %s failed: %d\n",
+ pf->int_name, err);
+ }
+ }
i40e_reset_and_rebuild(pf, false);
rtnl_unlock();
}
i40e_driver_string, i40e_driver_version_str);
pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
- /* we will see if single thread per module is enough for now,
- * it can't be any worse than using the system workqueue which
- * was already single threaded
+ /* There is no need to throttle the number of active tasks because
+ * each device limits its own task using a state bit for scheduling
+ * the service task, and the device tasks do not interfere with each
+ * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
+ * since we need to be able to guarantee forward progress even under
+ * memory pressure.
*/
- i40e_wq = create_singlethread_workqueue(i40e_driver_name);
+ i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM;
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
*
* Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
-i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
- u16 offset,
- u16 *data)
+static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
+ u16 offset, u16 *data)
{
i40e_status ret_code = I40E_SUCCESS;
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
- u16 offset,
- u16 *words, u16 *data)
+static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
+ u16 offset, u16 *words,
+ u16 *data)
{
i40e_status ret_code = I40E_SUCCESS;
return ret_code;
}
-/**
- * __i40e_write_nvm_word - Writes Shadow RAM word
- * @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to write
- * @data: word to write to the Shadow RAM
- *
- * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
- * NVM ownership have to be acquired and released (on ARQ completion event
- * reception) by caller. To commit SR to NVM update checksum function
- * should be called.
- **/
-i40e_status __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
- void *data)
-{
- *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
-
- /* Value 0x00 below means that we treat SR as a flat mem */
- return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
-}
-
-/**
- * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
- * @hw: pointer to the HW structure
- * @module_pointer: module pointer location in words from the NVM beginning
- * @offset: offset of the Shadow RAM buffer to write
- * @words: number of words to write
- * @data: words to write to the Shadow RAM
- *
- * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
- * NVM ownership must be acquired before calling this function and released
- * on ARQ completion event reception by caller. To commit SR to NVM update
- * checksum function should be called.
- **/
-i40e_status __i40e_write_nvm_buffer(struct i40e_hw *hw,
- u8 module_pointer, u32 offset,
- u16 words, void *data)
-{
- __le16 *le_word_ptr = (__le16 *)data;
- u16 *word_ptr = (u16 *)data;
- u32 i = 0;
-
- for (i = 0; i < words; i++)
- le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
-
- /* Here we will only write one buffer as the size of the modules
- * mirrored in the Shadow RAM is always less than 4K.
- */
- return i40e_write_nvm_aq(hw, module_pointer, offset, words,
- data, false);
-}
-
/**
* i40e_calc_nvm_checksum - Calculates and returns the checksum
* @hw: pointer to hardware structure
* is customer specific and unknown. Therefore, this function skips all maximum
* possible size of VPD (1kB).
**/
-i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
+static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
{
i40e_status ret_code = I40E_SUCCESS;
struct i40e_virt_mem vmem;
*((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
}
+ /* Clear error status on read */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
return I40E_SUCCESS;
}
+ /* Clear status even it is not read and log */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ }
+
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
}
hw->nvm_wait_opcode = 0;
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
+
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
if (hw->nvm_buff.va) {
buff = hw->nvm_buff.va;
- memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+ i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
+ I40E_NONDMA_TO_NONDMA);
}
}
__func__, cmd->offset, cmd->offset + len);
buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
- memcpy(bytes, buff, len);
+ i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
bytes += len;
remainder -= len;
i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
__func__, start_byte, start_byte + remainder);
- memcpy(bytes, buff, remainder);
+ i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
}
return I40E_SUCCESS;
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* actual OS primitives
*/
-#define hw_dbg(h, s, ...) do { \
- pr_debug("i40e %02x.%x " s, \
- (h)->bus.device, (h)->bus.func, \
- ##__VA_ARGS__); \
+#define hw_dbg(h, s, ...) do { \
+ pr_debug("i40e %02x:%02x.%x " s, \
+ (h)->bus.bus_id, (h)->bus.device, \
+ (h)->bus.func, ##__VA_ARGS__); \
} while (0)
#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s)
#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m)
-#define i40e_debug(h, m, s, ...) \
-do { \
- if (((m) & (h)->debug_mask)) \
- pr_info("i40e %02x.%x " s, \
- (h)->bus.device, (h)->bus.func, \
- ##__VA_ARGS__); \
+#define i40e_debug(h, m, s, ...) \
+do { \
+ if (((m) & (h)->debug_mask)) \
+ pr_info("i40e %02x:%02x.%x " s, \
+ (h)->bus.bus_id, (h)->bus.device, \
+ (h)->bus.func, ##__VA_ARGS__); \
} while (0)
/* these things are all directly replaced with sed during the kernel build */
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
/* adminq functions */
i40e_status i40e_init_adminq(struct i40e_hw *hw);
i40e_status i40e_shutdown_adminq(struct i40e_hw *hw);
-i40e_status i40e_init_asq(struct i40e_hw *hw);
-i40e_status i40e_init_arq(struct i40e_hw *hw);
-i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw);
-i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw);
-i40e_status i40e_shutdown_asq(struct i40e_hw *hw);
-i40e_status i40e_shutdown_arq(struct i40e_hw *hw);
-u16 i40e_clean_asq(struct i40e_hw *hw);
-void i40e_free_adminq_asq(struct i40e_hw *hw);
-void i40e_free_adminq_arq(struct i40e_hw *hw);
-i40e_status i40e_validate_mac_addr(u8 *mac_addr);
void i40e_adminq_init_ring_data(struct i40e_hw *hw);
i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
bool qualified_modules, bool report_init,
struct i40e_aq_get_phy_abilities_resp *abilities,
bool atomic_reset);
i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_mac_config(struct i40e_hw *hw,
- u16 max_frame_size, bool crc_en, u16 pacing,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
- u64 *advt_reg,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_partner_advt(struct i40e_hw *hw,
- u64 *advt_reg,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_lb_modes(struct i40e_hw *hw, u16 lb_modes,
- struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
bool rx_only_promisc);
i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
u16 seid, bool enable,
struct i40e_asq_cmd_details *cmd_details);
struct i40e_asq_cmd_details *cmd_details,
u16 *rules_used, u16 *rules_free);
-i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
- void *buff, u16 buff_size, u16 tlv_len,
- u16 *mib_len,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
- u8 bridge_type, void *buff, u16 buff_size,
- u16 old_len, u16 new_len, u16 offset,
- u16 *mib_len,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
- u8 bridge_type, void *buff, u16 buff_size,
- u16 tlv_len, u16 *mib_len,
- struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_aqc_switch_resource_alloc_element_resp *buf,
u16 count,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags,
- u16 mac_seid, u16 vsi_seid,
- u16 *ret_seid);
-i40e_status i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue,
- u16 vsi_seid, u16 tag, u16 queue_num,
- u16 *tags_used, u16 *tags_free,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid,
- u16 tag, u16 *tags_used, u16 *tags_free,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
- u16 etag, u8 num_tags_in_buf, void *buf,
- u16 *tags_used, u16 *tags_free,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
- u16 etag, u16 *tags_used, u16 *tags_free,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid,
- u16 old_tag, u16 new_tag, u16 *tags_used,
- u16 *tags_free,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
- u16 vlan_id, u16 *stat_index,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid,
- u16 vlan_id, u16 stat_index,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_port_parameters(struct i40e_hw *hw,
- u16 bad_frame_vsi, bool save_bad_pac,
- bool pad_short_pac, bool double_vlan,
- struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_credit,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw,
- u8 tcmap, bool request, u8 *tcmap_ret,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_switch_comp_ets_bw_limit(
- struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
u32 reg_addr0, u32 *reg_val0,
u32 reg_addr1, u32 *reg_val1);
-i40e_status i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
- u32 addr, u32 dw_count, void *buffer);
-i40e_status i40e_aq_alternate_write(struct i40e_hw *hw,
- u32 reg_addr0, u32 reg_val0,
- u32 reg_addr1, u32 reg_val1);
-i40e_status i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
- u32 addr, u32 dw_count, void *buffer);
-i40e_status i40e_aq_alternate_clear(struct i40e_hw *hw);
-i40e_status i40e_aq_alternate_write_done(struct i40e_hw *hw,
- u8 bios_mode, bool *reset_needed);
-i40e_status i40e_aq_set_oem_mode(struct i40e_hw *hw,
- u8 oem_mode);
-
/* i40e_common */
i40e_status i40e_init_shared_code(struct i40e_hw *hw);
i40e_status i40e_pf_reset(struct i40e_hw *hw);
#ifdef I40E_FCOE
i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
#endif
-enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw);
/* prototype for functions used for NVM access */
i40e_status i40e_init_nvm(struct i40e_hw *hw);
i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module,
u32 offset, u16 words, void *data,
bool last_command);
-i40e_status __i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
- u16 *data);
-i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data);
-i40e_status __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
- void *data);
-i40e_status __i40e_write_nvm_buffer(struct i40e_hw *hw, u8 module,
- u32 offset, u16 words, void *data);
-i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum);
i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
u16 *checksum);
i40e_status i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
u16 *wake_reason,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page,
- u16 reg, u8 phy_addr, u16 *value);
-i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page,
- u16 reg, u8 phy_addr, u16 value);
+i40e_status i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value);
+i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value);
+i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
+i40e_status i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+i40e_status i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
struct timespec64 now;
- unsigned long flags;
- spin_lock_irqsave(&pf->tmreg_lock, flags);
+ mutex_lock(&pf->tmreg_lock);
i40e_ptp_read(pf, &now);
timespec64_add_ns(&now, delta);
i40e_ptp_write(pf, (const struct timespec64 *)&now);
- spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+ mutex_unlock(&pf->tmreg_lock);
return 0;
}
static int i40e_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- unsigned long flags;
- spin_lock_irqsave(&pf->tmreg_lock, flags);
+ mutex_lock(&pf->tmreg_lock);
i40e_ptp_read(pf, ts);
- spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+ mutex_unlock(&pf->tmreg_lock);
return 0;
}
const struct timespec64 *ts)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- unsigned long flags;
- spin_lock_irqsave(&pf->tmreg_lock, flags);
+ mutex_lock(&pf->tmreg_lock);
i40e_ptp_write(pf, ts);
- spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+ mutex_unlock(&pf->tmreg_lock);
return 0;
}
return -EOPNOTSUPP;
}
+/**
+ * i40e_ptp_update_latch_events - Read I40E_PRTTSYN_STAT_1 and latch events
+ * @pf: the PF data structure
+ *
+ * This function reads I40E_PRTTSYN_STAT_1 and updates the corresponding timers
+ * for noticed latch events. This allows the driver to keep track of the first
+ * time a latch event was noticed which will be used to help clear out Rx
+ * timestamps for packets that got dropped or lost.
+ *
+ * This function will return the current value of I40E_PRTTSYN_STAT_1 and is
+ * expected to be called only while under the ptp_rx_lock.
+ **/
+static u32 i40e_ptp_get_rx_events(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 prttsyn_stat, new_latch_events;
+ int i;
+
+ prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+ new_latch_events = prttsyn_stat & ~pf->latch_event_flags;
+
+ /* Update the jiffies time for any newly latched timestamp. This
+ * ensures that we store the time that we first discovered a timestamp
+ * was latched by the hardware. The service task will later determine
+ * if we should free the latch and drop that timestamp should too much
+ * time pass. This flow ensures that we only update jiffies for new
+ * events latched since the last time we checked, and not all events
+ * currently latched, so that the service task accounting remains
+ * accurate.
+ */
+ for (i = 0; i < 4; i++) {
+ if (new_latch_events & BIT(i))
+ pf->latch_events[i] = jiffies;
+ }
+
+ /* Finally, we store the current status of the Rx timestamp latches */
+ pf->latch_event_flags = prttsyn_stat;
+
+ return prttsyn_stat;
+}
+
/**
* i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
* @vsi: The VSI with the rings relevant to 1588
{
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- struct i40e_ring *rx_ring;
- unsigned long rx_event;
- u32 prttsyn_stat;
- int n;
+ int i;
/* Since we cannot turn off the Rx timestamp logic if the device is
* configured for Tx timestamping, we check if Rx timestamping is
if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
return;
- prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+ spin_lock_bh(&pf->ptp_rx_lock);
- /* Unless all four receive timestamp registers are latched, we are not
- * concerned about a possible PTP Rx hang, so just update the timeout
- * counter and exit.
- */
- if (!(prttsyn_stat & ((I40E_PRTTSYN_STAT_1_RXT0_MASK <<
- I40E_PRTTSYN_STAT_1_RXT0_SHIFT) |
- (I40E_PRTTSYN_STAT_1_RXT1_MASK <<
- I40E_PRTTSYN_STAT_1_RXT1_SHIFT) |
- (I40E_PRTTSYN_STAT_1_RXT2_MASK <<
- I40E_PRTTSYN_STAT_1_RXT2_SHIFT) |
- (I40E_PRTTSYN_STAT_1_RXT3_MASK <<
- I40E_PRTTSYN_STAT_1_RXT3_SHIFT)))) {
- pf->last_rx_ptp_check = jiffies;
- return;
- }
+ /* Update current latch times for Rx events */
+ i40e_ptp_get_rx_events(pf);
- /* Determine the most recent watchdog or rx_timestamp event. */
- rx_event = pf->last_rx_ptp_check;
- for (n = 0; n < vsi->num_queue_pairs; n++) {
- rx_ring = vsi->rx_rings[n];
- if (time_after(rx_ring->last_rx_timestamp, rx_event))
- rx_event = rx_ring->last_rx_timestamp;
+ /* Check all the currently latched Rx events and see whether they have
+ * been latched for over a second. It is assumed that any timestamp
+ * should have been cleared within this time, or else it was captured
+ * for a dropped frame that the driver never received. Thus, we will
+ * clear any timestamp that has been latched for over 1 second.
+ */
+ for (i = 0; i < 4; i++) {
+ if ((pf->latch_event_flags & BIT(i)) &&
+ time_is_before_jiffies(pf->latch_events[i] + HZ)) {
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(i));
+ pf->latch_event_flags &= ~BIT(i);
+ pf->rx_hwtstamp_cleared++;
+ dev_warn(&pf->pdev->dev,
+ "Clearing a missed Rx timestamp event for RXTIME[%d]\n",
+ i);
+ }
}
- /* Only need to read the high RXSTMP register to clear the lock */
- if (time_is_before_jiffies(rx_event + 5 * HZ)) {
- rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
- rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
- rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
- rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
- pf->last_rx_ptp_check = jiffies;
- pf->rx_hwtstamp_cleared++;
- dev_warn(&vsi->back->pdev->dev,
- "clearing PTP Rx timestamp hang\n");
- }
+ spin_unlock_bh(&pf->ptp_rx_lock);
}
/**
hw = &pf->hw;
- prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+ spin_lock_bh(&pf->ptp_rx_lock);
- if (!(prttsyn_stat & BIT(index)))
+ /* Get current Rx events and update latch times */
+ prttsyn_stat = i40e_ptp_get_rx_events(pf);
+
+ /* TODO: Should we warn about missing Rx timestamp event? */
+ if (!(prttsyn_stat & BIT(index))) {
+ spin_unlock_bh(&pf->ptp_rx_lock);
return;
+ }
+
+ /* Clear the latched event since we're about to read its register */
+ pf->latch_event_flags &= ~BIT(index);
lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index));
+ spin_unlock_bh(&pf->ptp_rx_lock);
+
ns = (((u64)hi) << 32) | lo;
i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns);
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ if (!(pf->flags & I40E_FLAG_PTP_L4_CAPABLE))
+ return -ERANGE;
pf->ptp_rx = true;
tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK |
I40E_PRTTSYN_CTL1_TSYNTYPE_V1 |
config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ if (!(pf->flags & I40E_FLAG_PTP_L4_CAPABLE))
+ return -ERANGE;
+ /* fall through */
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
pf->ptp_rx = true;
tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK |
- I40E_PRTTSYN_CTL1_TSYNTYPE_V2 |
- I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
- config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ I40E_PRTTSYN_CTL1_TSYNTYPE_V2;
+ if (pf->flags & I40E_FLAG_PTP_L4_CAPABLE) {
+ tsyntype |= I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ } else {
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ }
break;
case HWTSTAMP_FILTER_ALL:
default:
}
/* Clear out all 1588-related registers to clear and unlatch them. */
+ spin_lock_bh(&pf->ptp_rx_lock);
rd32(hw, I40E_PRTTSYN_STAT_0);
rd32(hw, I40E_PRTTSYN_TXTIME_H);
rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
+ pf->latch_event_flags = 0;
+ spin_unlock_bh(&pf->ptp_rx_lock);
/* Enable/disable the Tx timestamp interrupt based on user input. */
regval = rd32(hw, I40E_PRTTSYN_CTL0);
return;
}
- /* we have to initialize the spinlock first, since we can't control
- * when the user will enter the PHC device entry points
- */
- spin_lock_init(&pf->tmreg_lock);
+ mutex_init(&pf->tmreg_lock);
+ spin_lock_init(&pf->ptp_rx_lock);
/* ensure we have a clock device */
err = i40e_ptp_create_clock(pf);
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
}
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+/**
+ * i40e_fdir - Generate a Flow Director descriptor based on fdata
+ * @tx_ring: Tx ring to send buffer on
+ * @fdata: Flow director filter data
+ * @add: Indicate if we are adding a rule or deleting one
+ *
+ **/
+static void i40e_fdir(struct i40e_ring *tx_ring,
+ struct i40e_fdir_filter *fdata, bool add)
+{
+ struct i40e_filter_program_desc *fdir_desc;
+ struct i40e_pf *pf = tx_ring->vsi->back;
+ u32 flex_ptype, dtype_cmd;
+ u16 i;
+
+ /* grab the next descriptor */
+ i = tx_ring->next_to_use;
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
+ (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
+
+ flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
+ (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
+
+ flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
+ (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
+
+ /* Use LAN VSI Id if not programmed by user */
+ flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
+ ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+
+ dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
+
+ dtype_cmd |= add ?
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT :
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT;
+
+ dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
+ (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
+
+ dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
+ (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
+
+ if (fdata->cnt_index) {
+ dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+ dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
+ ((u32)fdata->cnt_index <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
+ }
+
+ fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
+ fdir_desc->rsvd = cpu_to_le32(0);
+ fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
+ fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
+}
+
#define I40E_FD_CLEAN_DELAY 10
/**
* i40e_program_fdir_filter - Program a Flow Director filter
* @pf: The PF pointer
* @add: True for add/update, False for remove
**/
-int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
- struct i40e_pf *pf, bool add)
+static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
+ u8 *raw_packet, struct i40e_pf *pf,
+ bool add)
{
- struct i40e_filter_program_desc *fdir_desc;
struct i40e_tx_buffer *tx_buf, *first;
struct i40e_tx_desc *tx_desc;
struct i40e_ring *tx_ring;
- unsigned int fpt, dcc;
struct i40e_vsi *vsi;
struct device *dev;
dma_addr_t dma;
u32 td_cmd = 0;
- u16 delay = 0;
u16 i;
/* find existing FDIR VSI */
- vsi = NULL;
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
- vsi = pf->vsi[i];
+ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
if (!vsi)
return -ENOENT;
dev = tx_ring->dev;
/* we need two descriptors to add/del a filter and we can wait */
- do {
- if (I40E_DESC_UNUSED(tx_ring) > 1)
- break;
+ for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
+ if (!i)
+ return -EAGAIN;
msleep_interruptible(1);
- delay++;
- } while (delay < I40E_FD_CLEAN_DELAY);
-
- if (!(I40E_DESC_UNUSED(tx_ring) > 1))
- return -EAGAIN;
+ }
dma = dma_map_single(dev, raw_packet,
I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
/* grab the next descriptor */
i = tx_ring->next_to_use;
- fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
first = &tx_ring->tx_bi[i];
- memset(first, 0, sizeof(struct i40e_tx_buffer));
-
- tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
-
- fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
- I40E_TXD_FLTR_QW0_QINDEX_MASK;
- fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
- I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
- fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
- I40E_TXD_FLTR_QW0_PCTYPE_MASK;
-
- /* Use LAN VSI Id if not programmed by user */
- if (fdir_data->dest_vsi == 0)
- fpt |= (pf->vsi[pf->lan_vsi]->id) <<
- I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
- else
- fpt |= ((u32)fdir_data->dest_vsi <<
- I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
- I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
-
- dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
-
- if (add)
- dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
- I40E_TXD_FLTR_QW1_PCMD_SHIFT;
- else
- dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
- I40E_TXD_FLTR_QW1_PCMD_SHIFT;
-
- dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
- I40E_TXD_FLTR_QW1_DEST_MASK;
-
- dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
- I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
-
- if (fdir_data->cnt_index != 0) {
- dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
- dcc |= ((u32)fdir_data->cnt_index <<
- I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
- I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
- }
-
- fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
- fdir_desc->rsvd = cpu_to_le32(0);
- fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
- fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
+ i40e_fdir(tx_ring, fdir_data, add);
/* Now program a dummy descriptor */
i = tx_ring->next_to_use;
struct iphdr *ip;
bool err = false;
u8 *raw_packet;
- u16 off;
int ret;
static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
/* For now, supports only word as flex payload */
if (i40e_is_flex_filter(fd_data)) {
+ u16 off, pattern;
+
off = ~(be16_to_cpu(fd_data->flex_mask[3]));
- *((u16 *)(raw_packet + off)) = fd_data->flex_bytes[3];
+ pattern = be16_to_cpu(fd_data->flex_bytes[3]);
+ *((u16 *)(raw_packet + off)) = pattern;
}
fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
struct iphdr *ip;
bool err = false;
u8 *raw_packet;
- u16 off;
int ret;
/* Dummy packet */
static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
/* For now, supports only word as flex payload */
if (i40e_is_flex_filter(fd_data)) {
+ u16 off, pattern;
+
off = ~(be16_to_cpu(fd_data->flex_mask[3]));
- *((u16 *)(raw_packet + off)) = fd_data->flex_bytes[3];
+ pattern = be16_to_cpu(fd_data->flex_bytes[3]);
+ *((u16 *)(raw_packet + off)) = pattern;
}
if (add) {
pf->fd_tcp_rule++;
- if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- }
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+ pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
} else {
pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
(pf->fd_tcp_rule - 1) : 0;
if (pf->fd_tcp_rule == 0) {
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
+ pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
}
}
return err ? -EOPNOTSUPP : 0;
}
-/**
- * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
- * a specific flow spec
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required for the FDir descriptor
- * @add: true adds a filter, false removes it
- *
- * Returns 0 if the filters were successfully added or removed
- **/
-static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
- struct i40e_fdir_filter *fd_data,
- bool add)
-{
- return -EOPNOTSUPP;
-}
-
/**
* i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
* a specific flow spec
struct iphdr *ip;
bool err = false;
u8 *raw_packet;
- u16 off;
int ret;
int i;
static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
/* For now, supports only word as flex payload */
if (i40e_is_flex_filter(fd_data)) {
+ u16 off, pattern;
+
off = ~(be16_to_cpu(fd_data->flex_mask[3]));
- *((u16 *)(raw_packet + off)) = fd_data->flex_bytes[3];
+ pattern = be16_to_cpu(fd_data->flex_bytes[3]);
+ *((u16 *)(raw_packet + off)) = pattern;
}
fd_data->pctype = i;
struct i40e_pf *pf = vsi->back;
int ret;
- switch (input->flow_type & FLOW_TYPE_MASK) {
+ switch (input->flow_type) {
case TCP_V4_FLOW:
ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
break;
case UDP_V4_FLOW:
ret = i40e_add_del_fdir_udpv4(vsi, input, add);
break;
- case SCTP_V4_FLOW:
- ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
- break;
- case IPV4_FLOW:
- ret = i40e_add_del_fdir_ipv4(vsi, input, add);
- break;
case IP_USER_FLOW:
switch (input->ip4_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
ret = i40e_add_del_fdir_udpv4(vsi, input, add);
break;
- case IPPROTO_SCTP:
- ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
- break;
- default:
+ case IPPROTO_IP:
ret = i40e_add_del_fdir_ipv4(vsi, input, add);
break;
+ default:
+ /* We cannot support masking based on protocol */
+ goto unsupported_flow;
}
break;
default:
+unsupported_flow:
dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
input->flow_type);
ret = -EINVAL;
pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
- pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+ (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) {
+ pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
}
*/
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
- !(pf->auto_disable_flags &
+ !(pf->hw_disabled_flags &
I40E_FLAG_FD_SB_ENABLED)) {
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
- pf->auto_disable_flags |=
+ pf->hw_disabled_flags |=
I40E_FLAG_FD_SB_ENABLED;
}
}
struct i40e_tx_buffer *tx_buffer)
{
if (tx_buffer->skb) {
- dev_kfree_skb_any(tx_buffer->skb);
+ if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+ kfree(tx_buffer->raw_buf);
+ else
+ dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buffer, dma),
DMA_TO_DEVICE);
}
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
- kfree(tx_buffer->raw_buf);
-
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
return;
/* cleanup Tx queue statistics */
- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index));
+ netdev_tx_reset_queue(txring_txq(tx_ring));
}
/**
return 0;
}
-#define WB_STRIDE 0x3
+#define WB_STRIDE 4
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
- * @tx_ring: tx ring to clean
- * @budget: how many cleans we're allowed
+ * @vsi: the VSI we care about
+ * @tx_ring: Tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
*
* Returns true if there's any budget left (e.g. the clean is finished)
**/
-static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+ struct i40e_ring *tx_ring, int napi_budget)
{
u16 i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
struct i40e_tx_desc *tx_head;
struct i40e_tx_desc *tx_desc;
- unsigned int total_packets = 0;
- unsigned int total_bytes = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = vsi->work_limit;
tx_buf = &tx_ring->tx_bi[i];
tx_desc = I40E_TX_DESC(tx_ring, i);
total_packets += tx_buf->gso_segs;
/* free the skb */
- dev_consume_skb_any(tx_buf->skb);
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
tx_ring->q_vector->tx.total_packets += total_packets;
if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
- unsigned int j = 0;
-
/* check to see if there are < 4 descriptors
* waiting to be written back, then kick the hardware to force
* them to be written back in case we stay in NAPI.
* In this mode on X722 we do not enable Interrupt.
*/
- j = i40e_get_tx_pending(tx_ring, false);
+ unsigned int j = i40e_get_tx_pending(tx_ring, false);
if (budget &&
- ((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+ ((j / WB_STRIDE) == 0) && (j > 0) &&
+ !test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
}
- netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
+ /* notify netdev of completed buffers */
+ netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+ !test_bit(__I40E_DOWN, &vsi->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
* because each write-back erases this info.
*/
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
- rx_desc->read.hdr_addr = 0;
rx_desc++;
bi++;
}
#endif /* I40E_ADD_PROBES */
-#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_GENEVE_RX_OFFLOAD)
+#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_GENEVE_RX_OFFLOAD) || defined(HAVE_UDP_ENC_RX_OFFLOAD)
#if defined(HAVE_SKBUFF_CSUM_LEVEL) || defined(ESX55)
#define I40E_TUNNEL_SUPPORT
#endif
union i40e_rx_desc *rx_desc)
{
struct i40e_rx_ptype_decoded decoded;
- bool ipv4, ipv6;
u32 rx_error, rx_status;
-#ifdef I40E_TUNNEL_SUPPORT
- bool tunnel = false;
-#endif
+ bool ipv4, ipv6;
u8 ptype;
u64 qword;
I40E_RXD_QW1_STATUS_SHIFT;
decoded = decode_rx_desc_ptype(ptype);
-#ifdef I40E_TUNNEL_SUPPORT
- if (decoded.known &&
- ((decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP) ||
- (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_UDP) ||
- (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_SCTP)))
- tunnel = true;
- else
- tunnel = false;
-#ifdef HAVE_SKBUFF_CSUM_LEVEL
-
- skb->encapsulation = tunnel ? 1 : 0;
-#endif
-#endif /* I40E_TUNNEL_SUPPORT */
-
skb->ip_summed = CHECKSUM_NONE;
skb_checksum_none_assert(skb);
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* The hardware supported by this driver does not validate outer
- * checksums for tunneled VXLAN or GENEVE frames. I don't agree
- * with it but the specification states that you "MAY validate", it
- * doesn't make it a hard requirement so if we have validated the
- * inner checksum report CHECKSUM_UNNECESSARY.
+#ifdef I40E_TUNNEL_SUPPORT
+ /* If there is an outer header present that might contain a checksum
+ * we need to bump the checksum level by 1 to reflect the fact that
+ * we are indicating we validated the inner checksum.
*/
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
#ifdef HAVE_SKBUFF_CSUM_LEVEL
- skb->csum_level = tunnel ? 1 : 0;
+ if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+ skb->csum_level = 1;
#endif
+#endif /* I40E_TUNNEL_SUPPORT */
+ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case I40E_RX_PTYPE_INNER_PROT_TCP:
+ case I40E_RX_PTYPE_INNER_PROT_UDP:
+ case I40E_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* fall through */
+ default:
+ break;
+ }
return;
checksum_fail:
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
- if (ring->netdev->features & NETIF_F_RXHASH)
+ if (!(ring->netdev->features & NETIF_F_RXHASH))
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT;
- u32 rsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+ u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
+ u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
- if (unlikely(rsyn)) {
- i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, rsyn);
- rx_ring->last_rx_timestamp = jiffies;
- }
+ if (unlikely(tsynvalid))
+ i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
#endif /* HAVE_PTP_1588_CLOCK */
i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
skb_record_rx_queue(skb, rx_ring->queue_index);
} else {
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- I40E_RXBUFFER_2048,
- DMA_FROM_DEVICE);
-
rx_buffer->skb = NULL;
}
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ I40E_RXBUFFER_2048,
+ DMA_FROM_DEVICE);
+
/* pull page into skb */
if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
/* hand second half of page back to the ring */
while (likely(total_rx_packets < (unsigned int)budget)) {
union i40e_rx_desc *rx_desc;
struct sk_buff *skb;
- u32 rx_status;
u16 vlan_tag;
u8 rx_ptype;
u64 qword;
rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
-
- if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
- break;
-
/* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr
* which is always zero because packet split isn't used, if the
* hardware wrote DD then it will be non-zero
*/
- if (!rx_desc->wb.qword1.status_error_len)
+ if (!i40e_test_staterr(rx_desc,
+ BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* This memory barrier is needed to keep us from reading
if (i40e_is_non_eop(rx_ring, rx_desc, skb))
continue;
+ /* ERR_MASK will only have valid bits if EOP set, and
+ * what we are doing here is actually checking
+ * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+ * the error field
+ */
+ if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
if (i40e_cleanup_headers(rx_ring, skb))
continue;
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
+
/* populate checksum, VLAN, and protocol */
i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
/* a small macro to shorten up some long lines */
#define INTREG I40E_PFINT_DYN_CTLN
+static inline int get_rx_itr(struct i40e_vsi *vsi, int idx)
+{
+ return vsi->rx_rings[idx]->rx_itr_setting;
+}
+
+static inline int get_tx_itr(struct i40e_vsi *vsi, int idx)
+{
+ return vsi->tx_rings[idx]->tx_itr_setting;
+}
/**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
bool rx = false, tx = false;
u32 rxval, txval;
int vector;
+ int idx = q_vector->v_idx;
+ int rx_itr_setting, tx_itr_setting;
vector = (q_vector->v_idx + vsi->base_vector);
*/
rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+ rx_itr_setting = get_rx_itr(vsi, idx);
+ tx_itr_setting = get_tx_itr(vsi, idx);
+
if (q_vector->itr_countdown > 0 ||
- (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
- !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
+ (!ITR_IS_DYNAMIC(rx_itr_setting) &&
+ !ITR_IS_DYNAMIC(tx_itr_setting))) {
goto enable_int;
}
- if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(rx_itr_setting)) {
rx = i40e_set_new_dynamic_itr(&q_vector->rx);
rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}
- if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(tx_itr_setting)) {
tx = i40e_set_new_dynamic_itr(&q_vector->tx);
txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
}
struct i40e_q_vector *q_vector =
container_of(napi, struct i40e_q_vector, napi);
struct i40e_vsi *vsi = q_vector->vsi;
+ struct i40e_ring *ring;
u64 flags = vsi->back->flags;
bool clean_complete = true;
bool arm_wb = false;
- struct i40e_ring *ring;
int budget_per_ring;
int work_done = 0;
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
i40e_for_each_ring(ring, q_vector->tx) {
- clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
- arm_wb = arm_wb || ring->arm_wb;
+ if (!i40e_clean_tx_irq(vsi, ring, budget)) {
+ clean_complete = false;
+ continue;
+ }
+ arm_wb |= ring->arm_wb;
ring->arm_wb = false;
}
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
i40e_for_each_ring(ring, q_vector->rx) {
- int cleaned;
+ int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
- cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
- /* if we didn't clean as many as budgeted, we must be done */
- clean_complete &= (budget_per_ring != cleaned);
+ /* if we clean as many as budgeted, we must not be done */
+ if (cleaned >= budget_per_ring)
+ clean_complete = false;
}
#ifndef HAVE_NETDEV_NAPI_LIST
#endif
/* If work not completed, return budget and polling will return */
if (!clean_complete) {
+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
+ const cpumask_t *aff_mask = &q_vector->affinity_mask;
+ int cpu_id = smp_processor_id();
+
+ /* It is possible that the interrupt affinity has changed but,
+ * if the cpu is pegged at 100%, polling will never exit while
+ * traffic continues and the interrupt will be stuck on this
+ * cpu. We check to make sure affinity is correct before we
+ * continue to poll, otherwise we must stop polling so the
+ * interrupt can move to the correct cpu.
+ */
+ if (likely(cpumask_test_cpu(cpu_id, aff_mask) ||
+ !(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))) {
+tx_only:
+ if (arm_wb) {
+ q_vector->tx.ring[0].tx_stats.tx_force_wb++;
+ i40e_enable_wb_on_itr(vsi, q_vector);
+ }
+ return budget;
+ }
+#else /* HAVE_IRQ_AFFINITY_NOTIFY */
tx_only:
- if (arm_wb){
+ if (arm_wb) {
q_vector->tx.ring[0].tx_stats.tx_force_wb++;
i40e_enable_wb_on_itr(vsi, q_vector);
}
return budget;
+#endif /* HAVE_IRQ_AFFINITY_NOTIFY */
}
if (flags & I40E_TXR_FLAGS_WB_ON_ITR)
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete_done(napi, work_done);
- if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
- i40e_update_enable_itr(vsi, q_vector);
- } else { /* Legacy mode */
+
+ /* If we're prematurely stopping polling to fix the interrupt
+ * affinity we want to make sure polling starts back up so we
+ * issue a call to i40e_force_wb which triggers a SW interrupt.
+ */
+ if (!clean_complete)
+ i40e_force_wb(vsi, q_vector);
+ else if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))
i40e_irq_dynamic_enable_icr0(vsi->back, false);
- }
- return 0;
+ else
+ i40e_update_enable_itr(vsi, q_vector);
+
+ return min(work_done, budget - 1);
}
/**
if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
return;
- if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ if ((pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
return;
/* if sampling is disabled do nothing */
th = (struct tcphdr *)(hdr.network + hlen);
/* Due to lack of space, no more new filters can be programmed */
- if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ if (th->syn && (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
return;
if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
- (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
+ (!(pf->hw_disabled_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
/* HW ATR eviction will take care of removing filters on FIN
* and RST packets.
*/
I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
- (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
+ (!(pf->hw_disabled_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
#endif /* HAVE_ENCAP_TSO_OFFLOAD */
/**
* i40e_tso - set up the tso context descriptor
- * @skb: ptr to the skb we're sending
+ * @first: pointer to first Tx buffer for xmit
* @hdr_len: ptr to the size of the packet header
* @cd_type_cmd_tso_mss: Quad Word 1
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
-static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
+static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
+ u64 *cd_type_cmd_tso_mss)
{
+ struct sk_buff *skb = first->skb;
u64 cd_cmd, cd_tso_len, cd_mss;
union {
struct iphdr *v4;
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
+ u16 gso_segs, gso_size;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
}
#ifdef HAVE_ENCAP_TSO_OFFLOAD
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+#ifdef NETIF_F_GSO_IPXIP4
+ SKB_GSO_IPXIP4 |
+#else
+#ifdef NETIF_F_GSO_IPIP
+ SKB_GSO_IPIP |
+#endif
+#ifdef NETIF_F_GSO_SIT
+ SKB_GSO_SIT |
+#endif
+#endif
+ SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM)) {
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
/* determine offset of outer transport header */
l4_offset = l4.hdr - skb->data;
- /* remove payload length from the outer checksum.
- * This code works a lot like csum_tcpudp_nofold,
- * and is endian correct, because the ntohs is really
- * just getting us a 0x1 or 0x100, which is then
- * converted by the compiler to a power of two shift
- * when used as a multiplier, which avoids the use of
- * ntohs on a variable, just to convert it back again.
- * This is done to allow the word by word computation
- * of the checksum to be done using the 32 bit scratch
- * area in paylen when computing the checksum. Clear
- * as mud?
- */
- paylen = (__force u16)l4.udp->check;
- paylen += ntohs((__force __be16)1) * (u16)~(skb->len - l4_offset);
- l4.udp->check = ~csum_fold((__force __wsum)paylen);
+ /* remove payload length from outer checksum */
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.udp->check, htonl(paylen));
}
/* reset pointers to inner headers */
/* determine offset of inner transport header */
l4_offset = l4.hdr - skb->data;
- /* remove payload length from inner checksum, see
- * explanatory comment above
- */
- paylen = (__force u16)l4.tcp->check;
- paylen += ntohs((__force __be16)1) * (u16)~(skb->len - l4_offset);
- l4.tcp->check = ~csum_fold((__force __wsum)paylen);
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ /* pull values out of skb_shinfo */
+ gso_size = skb_shinfo(skb)->gso_size;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+#ifndef HAVE_NDO_FEATURES_CHECK
+ /* too small a TSO segment size causes problems */
+ if (gso_size < 64) {
+ gso_size = 64;
+ gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, 64);
+ }
+#endif
+ /* update gso size and bytecount with header size */
+ first->gso_segs = gso_segs;
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
/* find the field values */
cd_cmd = I40E_TX_CTX_DESC_TSO;
cd_tso_len = skb->len - *hdr_len;
- cd_mss = skb_shinfo(skb)->gso_size;
+ cd_mss = gso_size;
*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
&l4_proto, &frag_off);
}
- /* compute outer L3 header size */
- tunnel |= ((l4.hdr - ip.hdr) / 4) <<
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
-
- /* switch IP header pointer from outer to inner header */
- ip.hdr = skb_inner_network_header(skb);
-
/* define outer transport */
switch (l4_proto) {
case IPPROTO_UDP:
tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_TUNNEL;
break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ *tx_flags |= I40E_TX_FLAGS_TUNNEL;
+ l4.hdr = skb_inner_network_header(skb);
+ break;
#endif
default:
if (*tx_flags & I40E_TX_FLAGS_TSO)
if (*tx_flags & I40E_TX_FLAGS_TSO)
tx_ring->vsi->back->tx_ip4_cso++;
#endif
+ /* compute outer L3 header size */
+ tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+ /* switch IP header pointer from outer to inner header */
+ ip.hdr = skb_inner_network_header(skb);
+
/* compute tunnel header size */
tunnel |= ((ip.hdr - l4.hdr) / 2) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
return false;
/* We need to walk through the list and validate that each group
- * of 6 fragments totals at least gso_size. However we don't need
- * to perform such validation on the last 6 since the last 6 cannot
- * inherit any data from a descriptor after them.
+ * of 6 fragments totals at least gso_size.
*/
nr_frags -= I40E_MAX_BUFFER_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
if (sum < 0)
return true;
- /* use pre-decrement to avoid processing last fragment */
- if (!--nr_frags)
+ if (!nr_frags--)
break;
sum -= skb_frag_size(stale++);
u16 i = tx_ring->next_to_use;
u32 td_tag = 0;
dma_addr_t dma;
- u16 gso_segs;
- u16 desc_count = 0;
- bool tail_bump = true;
- bool do_rs = false;
+ u16 desc_count = 1;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
I40E_TX_FLAGS_VLAN_SHIFT;
}
- if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
- gso_segs = skb_shinfo(skb)->gso_segs;
- else
- gso_segs = 1;
-
#ifdef I40E_ADD_PROBES
if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
- tx_ring->vsi->back->tcp_segs += gso_segs;
+ tx_ring->vsi->back->tcp_segs += first->gso_segs;
#endif
- /* multiply data chunks by size of headers */
- first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
- first->gso_segs = gso_segs;
- first->skb = skb;
first->tx_flags = tx_flags;
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
tx_bi = &tx_ring->tx_bi[i];
}
- /* set next_to_watch value indicating a packet is present */
- first->next_to_watch = tx_desc;
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i++;
if (i == tx_ring->count)
tx_ring->next_to_use = i;
- netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
- first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+ /* write last descriptor with EOP bit */
+ td_cmd |= I40E_TX_DESC_CMD_EOP;
+
+ /* We can OR these values together as they both are checked against
+ * 4 below and at this point desc_count will be used as a boolean value
+ * after this if/else block.
+ */
+ desc_count |= ++tx_ring->packet_stride;
+
/* Algorithm to optimize tail and RS bit setting:
- * if xmit_more is supported
- * if xmit_more is true
- * do not update tail and do not mark RS bit.
- * if xmit_more is false and last xmit_more was false
- * if every packet spanned less than 4 desc
- * then set RS bit on 4th packet and update tail
- * on every packet
- * else
- * update tail and set RS bit on every packet.
- * if xmit_more is false and last_xmit_more was true
- * update tail and set RS bit.
- * else (kernel < 3.18)
- * if every packet spanned less than 4 desc
- * then set RS bit on 4th packet and update tail
- * on every packet
- * else
- * set RS bit on EOP for every packet and update tail
+ * if queue is stopped
+ * mark RS bit
+ * reset packet counter
+ * else if xmit_more is supported and is true
+ * advance packet counter to 4
+ * reset desc_count to 0
*
- * Optimization: wmb to be issued only in case of tail update.
- * Also optimize the Descriptor WB path for RS bit with the same
- * algorithm.
+ * if desc_count >= 4
+ * mark RS bit
+ * reset packet counter
+ * if desc_count > 0
+ * update tail
*
- * Note: If there are less than 4 packets
+ * Note: If there are less than 4 descriptors
* pending and interrupts were disabled the service task will
* trigger a force WB.
*/
+ if (netif_xmit_stopped(txring_txq(tx_ring))) {
+ goto do_rs;
#ifdef HAVE_SKB_XMIT_MORE
- if (skb->xmit_more &&
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index))) {
- tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
- tail_bump = false;
- } else if (!skb->xmit_more &&
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index)) &&
- (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
- (tx_ring->packet_stride < WB_STRIDE) &&
- (desc_count < WB_STRIDE)) {
- tx_ring->packet_stride++;
- } else {
- tx_ring->packet_stride = 0;
- tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
- do_rs = true;
- }
-#else
- if ((tx_ring->packet_stride < WB_STRIDE) &&
- (desc_count < WB_STRIDE) &&
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index)))
- tx_ring->packet_stride++;
- else
- do_rs = true;
+ } else if (skb->xmit_more) {
+ /* set stride to arm on next packet and reset desc_count */
+ tx_ring->packet_stride = WB_STRIDE;
+ desc_count = 0;
#endif /* HAVE_SKB_XMIT_MORE */
- if (do_rs)
+ } else if (desc_count >= WB_STRIDE) {
+do_rs:
+ /* write last descriptor with RS bit set */
+ td_cmd |= I40E_TX_DESC_CMD_RS;
tx_ring->packet_stride = 0;
+ }
tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag) |
- cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
- I40E_TX_DESC_CMD_EOP) <<
- I40E_TXD_QW1_CMD_SHIFT);
+ build_ctob(td_cmd, td_offset, size, td_tag);
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ *
+ * We also use this memory barrier to make certain all of the
+ * status bits have been updated before next_to_watch is written.
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
/* notify HW of packet */
#ifdef HAVE_SKB_XMIT_MORE
- if (!tail_bump)
- prefetchw(tx_desc + 1);
-#endif /* HAVE_XMIT_MORE */
+ if (desc_count) {
+ writel(i, tx_ring->tail);
- if (tail_bump) {
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
+ /* we need this if more than one processor can write to our tail
+ * at a time, it synchronizes IO on IA64/Altix systems
*/
- wmb();
- writel(i, tx_ring->tail);
+ mmiowb();
}
+#else
+ writel(i, tx_ring->tail);
+
+ /* we need this if more than one processor can write to our tail
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
+ mmiowb();
+#endif /* HAVE_XMIT_MORE */
return;
count = i40e_xmit_descriptor_count(skb);
if (i40e_chk_linearize(skb, count)) {
- if (__skb_linearize(skb))
- goto out_drop;
+ if (__skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
return NETDEV_TX_BUSY;
}
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_bi[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
/* prepare the xmit flags */
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop;
/* obtain protocol of skb */
protocol = vlan_get_protocol(skb);
- /* record the location of the first descriptor for this packet */
- first = &tx_ring->tx_bi[tx_ring->next_to_use];
-
/* setup IPv4/IPv6 offloads */
if (protocol == htons(ETH_P_IP))
tx_flags |= I40E_TX_FLAGS_IPV4;
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
+ tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
goto out_drop;
return NETDEV_TX_OK;
out_drop:
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
return NETDEV_TX_OK;
}
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
*/
#define INTRL_ENA BIT(6)
#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
-#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
+/**
+ * i40e_intrl_usec_to_reg - convert interrupt rate limit to register
+ * @intrl: interrupt rate limit to convert
+ *
+ * This function converts a decimal interrupt rate limit to the appropriate
+ * register format expected by the firmware when setting interrupt rate limit.
+ */
+static inline u16 i40e_intrl_usec_to_reg(int intrl)
+{
+ if (intrl >> 2)
+ return ((intrl >> 2) | INTRL_ENA);
+ else
+ return 0;
+}
#define I40E_INTRL_8K 125 /* 8000 ints/sec */
#define I40E_INTRL_62K 16 /* 62500 ints/sec */
#define I40E_INTRL_83K 12 /* 83333 ints/sec */
#define I40E_MAX_DATA_PER_TXD_ALIGNED \
(I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
-/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
- * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact
- * that 12K is not a power of 2 and division is expensive. It is used to
- * approximate the number of descriptors used per linear buffer. Note
- * that this will overestimate in some cases as it doesn't account for the
- * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
- * the error should not impact things much as large buffers usually mean
- * we will use fewer descriptors then there are frags in an skb.
+/**
+ * i40e_txd_use_count - estimate the number of descriptors needed for Tx
+ * @size: transmit request size in bytes
+ *
+ * Due to hardware alignment restrictions (4K alignment), we need to
+ * assume that we can have no more than 12K of data per descriptor, even
+ * though each descriptor can take up to 16K - 1 bytes of aligned memory.
+ * Thus, we need to divide by 12K. But division is slow! Instead,
+ * we decompose the operation into shifts and one relatively cheap
+ * multiply operation.
+ *
+ * To divide by 12K, we first divide by 4K, then divide by 3:
+ * To divide by 4K, shift right by 12 bits
+ * To divide by 3, multiply by 85, then divide by 256
+ * (Divide by 256 is done by shifting right by 8 bits)
+ * Finally, we add one to round up. Because 256 isn't an exact multiple of
+ * 3, we'll underestimate near each multiple of 12K. This is actually more
+ * accurate as we have 4K - 1 of wiggle room that we can fit into the last
+ * segment. For our purposes this is accurate out to 1M which is orders of
+ * magnitude greater than our largest possible GSO size.
+ *
+ * This would then be implemented as:
+ * return (((size >> 12) * 85) >> 8) + 1;
+ *
+ * Since multiplication and division are commutative, we can reorder
+ * operations into:
+ * return ((size * 85) >> 20) + 1;
*/
static inline unsigned int i40e_txd_use_count(unsigned int size)
{
- const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
- const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
- unsigned int adjust = ~(u32)0;
-
- /* if we rounded up on the reciprocal pull down the adjustment */
- if ((max * reciprocal) > adjust)
- adjust = ~(u32)(reciprocal - 1);
-
- return (u32)((((u64)size * reciprocal) + adjust) >> 32);
+ return ((size * 85) >> 20) + 1;
}
/* Tx Descriptors needed, worst case */
u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail;
+ /* high bit set means dynamic, use accessor routines to read/write.
+ * hardware only supports 2us resolution for the ITR registers.
+ * these values always store the USER setting, and must be converted
+ * before programming to a register.
+ */
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
u16 rx_buf_len;
u8 atr_sample_rate;
u8 atr_count;
-#ifdef HAVE_PTP_1588_CLOCK
- unsigned long last_rx_timestamp;
-
-#endif /* HAVE_PTP_1588_CLOCK */
bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */
u8 packet_stride;
u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
-#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
/* stats structs */
struct i40e_queue_stats stats;
}
/**
- * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
* @skb: send buffer
* @tx_ring: ring to send buffer on
*
}
/**
- * i40e_maybe_stop_tx - 1st level check for tx stop conditions
+ * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
/* we can support up to 8 data buffers for a single send */
return count != I40E_MAX_BUFFER_TXD;
}
+
+/**
+ * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
+ * @ring: Tx ring to find the netdev equivalent of
+ **/
+static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
+{
+ return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
#endif /* _I40E_TXRX_H_ */
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#include "i40e_lan_hmc.h"
#include "i40e_devids.h"
-#define UNREFERENCED_XPARAMETER
-
/* I40E_MASK is a macro used on 32 bit registers */
#define I40E_MASK(mask, shift) (mask << shift)
-#define I40E_MAX_PF 16
-#define I40E_MAX_PF_VSI 64
-#define I40E_MAX_PF_QP 128
#define I40E_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 3
#define I40E_MAX_CHAINED_RX_BUFFERS 5
#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
-/* something less than 1 minute */
-#define I40E_HEARTBEAT_TIMEOUT (HZ * 50)
-
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
-/* Check whether address is multicast. */
-#define is_multicast_ether_addr(address) (bool)(((u8 *)(address))[0] & ((u8)0x01))
-
-/* Check whether an address is broadcast. */
-#define I40E_IS_BROADCAST(address) \
- ((((u8 *)(address))[0] == ((u8)0xff)) && \
- (((u8 *)(address))[1] == ((u8)0xff)))
-
/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
#define I40E_MS_TO_GTIME(time) ((time) * 1000)
/* Data type manipulation macros. */
-
-#define I40E_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
-#define I40E_LO_BYTE(x) ((u8)((x) & 0xFF))
-
-/* Number of Transmit Descriptors must be a multiple of 8. */
-#define I40E_REQ_TX_DESCRIPTOR_MULTIPLE 8
-/* Number of Receive Descriptors must be a multiple of 32 if
- * the number of descriptors is greater than 32.
- */
-#define I40E_REQ_RX_DESCRIPTOR_MULTIPLE 32
-
#define I40E_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
I40E_DEBUG_ALL = 0xFFFFFFFF
};
-/* PCI Bus Info */
-#define I40E_PCI_LINK_STATUS 0xB2
-#define I40E_PCI_LINK_WIDTH 0x3F0
-#define I40E_PCI_LINK_WIDTH_1 0x10
-#define I40E_PCI_LINK_WIDTH_2 0x20
-#define I40E_PCI_LINK_WIDTH_4 0x40
-#define I40E_PCI_LINK_WIDTH_8 0x80
-#define I40E_PCI_LINK_SPEED 0xF
-#define I40E_PCI_LINK_SPEED_2500 0x1
-#define I40E_PCI_LINK_SPEED_5000 0x2
-#define I40E_PCI_LINK_SPEED_8000 0x3
-
-#define I40E_MDIO_STCODE 0
-#define I40E_MDIO_OPCODE_ADDRESS 0
-#define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \
+#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_MASK(1, \
+ I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_MASK(1, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_MASK(2, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+
+#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_MASK(0, \
+ I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_MASK(0, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \
+#define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK I40E_MASK(2, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_OPCODE_READ I40E_MASK(3, \
+#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_PHY_COM_REG_PAGE 0x1E
#define I40E_PHY_LED_MODE_MASK 0xFFFF
#define I40E_PHY_LED_MODE_ORIG 0x80000000
-/* Memory types */
-enum i40e_memset_type {
- I40E_NONDMA_MEM = 0,
- I40E_DMA_MEM
-};
-
-/* Memcpy types */
-enum i40e_memcpy_type {
- I40E_NONDMA_TO_NONDMA = 0,
- I40E_NONDMA_TO_DMA,
- I40E_DMA_TO_DMA,
- I40E_DMA_TO_NONDMA
-};
-
#define I40E_FW_API_VERSION_MINOR_X722 0x0005
#define I40E_FW_API_VERSION_MINOR_X710 0x0005
*/
enum i40e_mac_type {
I40E_MAC_UNKNOWN = 0,
- I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
I40E_MAC_X722,
enum i40e_aq_link_speed link_speed;
u8 link_info;
u8 an_info;
+ u8 fec_info;
u8 ext_info;
u8 loopback;
/* is Link Status Event notification to SW enabled */
#define I40E_MODULE_TYPE_1000BASE_T 0x08
};
-enum i40e_aq_capabilities_phy_type {
- I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII),
- I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX),
- I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4),
- I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR),
- I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4),
- I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI),
- I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI),
- I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI),
- I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI),
- I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI),
- I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
- I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC),
- I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC),
- I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX),
- I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR),
- I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR),
- I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1),
- I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4),
- I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4),
- I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4),
- I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX),
- I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX),
- I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL = BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
- I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2)
-};
-
struct i40e_phy_info {
struct i40e_link_status link_info;
struct i40e_link_status link_info_old;
bool get_link_info;
enum i40e_media_type media_type;
/* all the phy types the NVM is capable of */
- u32 phy_types;
-};
-
+ u64 phy_types;
+};
+
+#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII)
+#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX)
+#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4)
+#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR)
+#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4)
+#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI)
+#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI)
+#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI)
+#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI)
+#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR)
+#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR)
+#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4)
+#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX)
+#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
+#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
+/*
+ * Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
+ * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit
+ * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So,
+ * a shift is needed to adjust for this with values larger than 31. The
+ * only affected values are I40E_PHY_TYPE_25GBASE_*.
+ */
+#define I40E_PHY_TYPE_OFFSET 1
+#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
+ I40E_PHY_TYPE_OFFSET)
#define I40E_HW_CAP_MAX_GPIO 30
-#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
-#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
-
enum i40e_acpi_programming_method {
I40E_ACPI_PROGRAMMING_METHOD_HW_FVL = 0,
I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
u32 management_mode;
+ u32 mng_protocols_over_mctp;
+#define I40E_MNG_PROTOCOL_PLDM 0x2
+#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4
+#define I40E_MNG_PROTOCOL_NCSI 0x8
u32 npar_enable;
u32 os2bmc;
u32 valid_functions;
I40E_NVMUPD_STATE_WRITING,
I40E_NVMUPD_STATE_INIT_WAIT,
I40E_NVMUPD_STATE_WRITE_WAIT,
+ I40E_NVMUPD_STATE_ERROR
};
/* nvm_access definition and its masks/shifts need to be accessible to
u16 func;
u16 device;
u16 lan_id;
+ u16 bus_id;
};
/* Flow control (FC) parameters */
} wb; /* writeback */
};
-#define I40E_RXD_QW0_MIRROR_STATUS_SHIFT 8
-#define I40E_RXD_QW0_MIRROR_STATUS_MASK (0x3FUL << \
- I40E_RXD_QW0_MIRROR_STATUS_SHIFT)
-#define I40E_RXD_QW0_FCOEINDX_SHIFT 0
-#define I40E_RXD_QW0_FCOEINDX_MASK (0xFFFUL << \
- I40E_RXD_QW0_FCOEINDX_SHIFT)
-
enum i40e_rx_desc_status_bits {
/* Note: These are predefined bit offsets */
I40E_RX_DESC_STATUS_DD_SHIFT = 0,
#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
-#define I40E_RXD_QW1_STATUS_UMBCAST_SHIFT I40E_RX_DESC_STATUS_UMBCAST
-#define I40E_RXD_QW1_STATUS_UMBCAST_MASK (0x3UL << \
- I40E_RXD_QW1_STATUS_UMBCAST_SHIFT)
-
enum i40e_rx_desc_fltstat_values {
I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
I40E_RX_DESC_FLTSTAT_RSS_HASH = 3,
};
-#define I40E_RXD_PACKET_TYPE_UNICAST 0
-#define I40E_RXD_PACKET_TYPE_MULTICAST 1
-#define I40E_RXD_PACKET_TYPE_BROADCAST 2
-#define I40E_RXD_PACKET_TYPE_MIRRORED 3
-
#define I40E_RXD_QW1_ERROR_SHIFT 19
#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
};
-#define I40E_RX_PTYPE_BIT_MASK 0x0FFFFFFF
-#define I40E_RX_PTYPE_SHIFT 56
-
#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
-#define I40E_RXD_QW1_NEXTP_SHIFT 38
-#define I40E_RXD_QW1_NEXTP_MASK (0x1FFFULL << I40E_RXD_QW1_NEXTP_SHIFT)
-
-#define I40E_RXD_QW2_EXT_STATUS_SHIFT 0
-#define I40E_RXD_QW2_EXT_STATUS_MASK (0xFFFFFUL << \
- I40E_RXD_QW2_EXT_STATUS_SHIFT)
-
enum i40e_rx_desc_ext_status_bits {
/* Note: These are predefined bit offsets */
I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
};
-#define I40E_RXD_QW2_L2TAG2_SHIFT 0
-#define I40E_RXD_QW2_L2TAG2_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG2_SHIFT)
-
-#define I40E_RXD_QW2_L2TAG3_SHIFT 16
-#define I40E_RXD_QW2_L2TAG3_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG3_SHIFT)
-
enum i40e_rx_desc_pe_status_bits {
/* Note: These are predefined bit offsets */
I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
-#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT 0
-#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_MASK (0x7FFFUL << \
- I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT)
-
#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
};
-#define I40E_TWO_BIT_MASK 0x3
-#define I40E_THREE_BIT_MASK 0x7
-#define I40E_FOUR_BIT_MASK 0xF
-#define I40E_EIGHTEEN_BIT_MASK 0x3FFFF
-
/* TX Descriptor */
struct i40e_tx_desc {
__le64 buffer_addr; /* Address of descriptor's data buf */
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
};
-#define I40E_TXD_QW1_MACLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_MACLEN_SHIFT)
-#define I40E_TXD_QW1_IPLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_IPLEN_SHIFT)
-#define I40E_TXD_QW1_L4LEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
-#define I40E_TXD_QW1_FCLEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
-
#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
-struct i40e_nop_desc {
- __le64 rsvd;
- __le64 dtype_cmd;
-};
-
-#define I40E_TXD_NOP_QW1_DTYPE_SHIFT 0
-#define I40E_TXD_NOP_QW1_DTYPE_MASK (0xFUL << I40E_TXD_NOP_QW1_DTYPE_SHIFT)
-
-#define I40E_TXD_NOP_QW1_CMD_SHIFT 4
-#define I40E_TXD_NOP_QW1_CMD_MASK (0x7FUL << I40E_TXD_NOP_QW1_CMD_SHIFT)
-
-enum i40e_tx_nop_desc_cmd_bits {
- /* Note: These are predefined bit offsets */
- I40E_TX_NOP_DESC_EOP_SHIFT = 0,
- I40E_TX_NOP_DESC_RS_SHIFT = 1,
- I40E_TX_NOP_DESC_RSV_SHIFT = 2 /* 5 bits */
-};
-
struct i40e_filter_program_desc {
__le32 qindex_flex_ptype_vsi;
__le32 rsvd;
#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
-#define I40E_TXD_FLTR_QW1_DTYPE_SHIFT 0
-#define I40E_TXD_FLTR_QW1_DTYPE_MASK (0xFUL << I40E_TXD_FLTR_QW1_DTYPE_SHIFT)
-
#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
I40E_TXD_FLTR_QW1_CMD_SHIFT)
/* Checksum and Shadow RAM pointers */
#define I40E_SR_NVM_CONTROL_WORD 0x00
-#define I40E_SR_PCIE_ANALOG_CONFIG_PTR 0x03
-#define I40E_SR_PHY_ANALOG_CONFIG_PTR 0x04
-#define I40E_SR_OPTION_ROM_PTR 0x05
-#define I40E_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06
-#define I40E_SR_AUTO_GENERATED_POINTERS_PTR 0x07
-#define I40E_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08
-#define I40E_SR_EMP_GLOBAL_MODULE_PTR 0x09
-#define I40E_SR_RO_PCIE_LCB_PTR 0x0A
-#define I40E_SR_EMP_IMAGE_PTR 0x0B
-#define I40E_SR_PE_IMAGE_PTR 0x0C
-#define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D
-#define I40E_SR_MNG_CONFIG_PTR 0x0E
#define I40E_SR_EMP_MODULE_PTR 0x0F
#define I40E_SR_PBA_FLAGS 0x15
#define I40E_SR_PBA_BLOCK_PTR 0x16
#define I40E_SR_NVM_EETRACK_LO 0x2D
#define I40E_SR_NVM_EETRACK_HI 0x2E
#define I40E_SR_VPD_PTR 0x2F
-#define I40E_SR_PXE_SETUP_PTR 0x30
-#define I40E_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31
-#define I40E_SR_NVM_ORIGINAL_EETRACK_LO 0x34
-#define I40E_SR_NVM_ORIGINAL_EETRACK_HI 0x35
-#define I40E_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37
-#define I40E_SR_POR_REGS_AUTO_LOAD_PTR 0x38
-#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
-#define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
-#define I40E_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
#define I40E_SR_SW_CHECKSUM_WORD 0x3F
-#define I40E_SR_1ST_FREE_PROVISION_AREA_PTR 0x40
-#define I40E_SR_4TH_FREE_PROVISION_AREA_PTR 0x42
-#define I40E_SR_3RD_FREE_PROVISION_AREA_PTR 0x44
-#define I40E_SR_2ND_FREE_PROVISION_AREA_PTR 0x46
-#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48
-#define I40E_SR_FEATURE_CONFIGURATION_PTR 0x49
-#define I40E_SR_CONFIGURATION_METADATA_PTR 0x4D
-#define I40E_SR_IMMEDIATE_VALUES_PTR 0x4E
/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
/* Shadow RAM related */
#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
-#define I40E_SR_BUF_ALIGNMENT 4096
#define I40E_SR_WORDS_IN_1KB 512
/* Checksum should be calculated such that after adding all the words,
* including the checksum word itself, the sum should be 0xBABA.
__le64 difapp_msk_bias;
};
-#define I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_SHIFT 0
-#define I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_MASK (0xFFFULL << \
- I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_SHIFT)
-
enum i40e_fcoe_difdix_ctx_desc_flags_bits {
/* 2 BITS */
I40E_FCOE_DIFDIX_CTX_DESC_RSVD = 0x0000,
I40E_FCOE_DIFDIX_CTX_DESC_DIFBLK_4K = 0x0800
};
-#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_SHIFT 12
-#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_MASK (0x3FFULL << \
- I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_SHIFT)
-
-#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_SHIFT 22
-#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_MASK (0x3FFULL << \
- I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_SHIFT)
-
-#define I40E_FCOE_DIFDIX_CTX_QW0_REF_SHIFT 32
-#define I40E_FCOE_DIFDIX_CTX_QW0_REF_MASK (0xFFFFFFFFULL << \
- I40E_FCOE_DIFDIX_CTX_QW0_REF_SHIFT)
-
-#define I40E_FCOE_DIFDIX_CTX_QW1_APP_SHIFT 0
-#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MASK (0xFFFFULL << \
- I40E_FCOE_DIFDIX_CTX_QW1_APP_SHIFT)
-
-#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_SHIFT 16
-#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_MASK (0xFFFFULL << \
- I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_SHIFT)
-
-#define I40E_FCOE_DIFDIX_CTX_QW1_REF_BIAS_SHIFT 32
-#define I40E_FCOE_DIFDIX_CTX_QW0_REF_BIAS_MASK (0xFFFFFFFFULL << \
- I40E_FCOE_DIFDIX_CTX_QW1_REF_BIAS_SHIFT)
-
/* FCoE DIF/DIX Buffers descriptor */
struct i40e_fcoe_difdix_buffers_desc {
__le64 buff_addr0;
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
+
+#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
};
#define I40E_PF_EVENT_SEVERITY_INFO 0
-#define I40E_PF_EVENT_SEVERITY_ATTENTION 1
-#define I40E_PF_EVENT_SEVERITY_ACTION_REQUIRED 2
#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
struct i40e_virtchnl_pf_event {
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
+ if (!vsi)
+ return I40E_QUEUE_END_OF_LIST;
+
if (le16_to_cpu(vsi->info.mapping_flags) &
I40E_AQ_VSI_QUE_MAP_NONCONTIG)
pf_queue_id =
u32 qtx_ctl;
int ret = 0;
+ if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ ret = -ENOENT;
+ goto error_context;
+ }
pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
vsi = i40e_find_vsi_from_id(pf, vsi_id);
+ if (!vsi) {
+ ret = -ENOENT;
+ goto error_context;
+ }
/* clear the context structure first */
memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
goto error_alloc_vsi_res;
}
if (type == I40E_VSI_SRIOV) {
- u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u64 hena = i40e_pf_get_default_rss_hena(pf);
+ u8 broadcast[ETH_ALEN];
vf->lan_vsi_idx = vsi->idx;
vf->lan_vsi_id = vsi->id;
if (vf->port_vlan_id)
i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
- f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
- vf->port_vlan_id ? vf->port_vlan_id : -1,
- true, false);
+ f = i40e_add_mac_filter(vsi,
+ vf->default_lan_addr.addr);
if (!f)
dev_info(&pf->pdev->dev,
"Could not add MAC filter %pM for VF %d\n",
vf->default_lan_addr.addr, vf->vf_id);
}
- f = i40e_add_filter(vsi, brdcast,
- vf->port_vlan_id ? vf->port_vlan_id : -1,
- true, false);
+ eth_broadcast_addr(broadcast);
+ f = i40e_add_mac_filter(vsi, broadcast);
if (!f)
dev_info(&pf->pdev->dev,
"Could not allocate VF broadcast filter\n");
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
+ wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
}
/* program mac filter */
if (vf->lan_vsi_idx == 0)
goto complete_reset;
- i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false);
+ i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
complete_reset:
/* reallocate VF resources to reset the VSI state */
i40e_free_vf_res(vf);
i40e_enable_vf_mappings(vf);
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+ vf->num_vlan = 0;
}
/* tell the VF the reset is done */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
for (i = 0; i < pf->num_alloc_vfs; i++)
if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
- i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
- false);
+ i40e_vsi_stop_rings(pf->vsi[pf->vf[i].lan_vsi_idx]);
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
/**
* i40e_alloc_vfs
* @pf: pointer to the PF structure
- * @num_alloc_vfs: number of vfs to allocate
+ * @num_alloc_vfs: number of VFs to allocate
*
* allocate VF resources
**/
/**
* i40e_pci_sriov_enable
* @pdev: pointer to a pci_dev structure
- * @num_vfs: number of vfs to allocate
+ * @num_vfs: number of VFs to allocate
*
* Enable or change the number of VFs
**/
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
}
+ if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
+ vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+
if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
if (pf->flags & I40E_FLAG_MFP_ENABLED) {
dev_err(&pf->pdev->dev,
}
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+ /* if vf is in base mode, keep only the base capabilities that are
+ * negotiated
+ */
+ if (pf->vf_base_mode_only)
+ vfres->vf_offload_flags &= I40E_VF_BASE_MODE_OFFLOADS;
err:
/* send the response back to the VF */
ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
* i40e_getnum_vf_vsi_vlan_filters
* @vsi: pointer to the vsi
*
- * called to get the number of vlans offloaded on this vf
+ * called to get the number of VLANs offloaded on this VF
**/
static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
{
struct i40e_mac_filter *f;
- int num_vlans = 0;
+ int num_vlans = 0, bkt;
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
num_vlans++;
}
bool allmulti = false;
bool alluni = false;
int aq_err = 0;
+ int bkt;
struct i40e_vsi *vsi;
vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
- !i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
+ !vsi) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
dev_err(&pf->pdev->dev,
- "VF %d doesn't meet requirements to enter promiscuous mode\n",
+ "Unprivileged VF %d is attempting to configure promiscuous mode\n",
vf->vf_id);
- aq_ret = I40E_ERR_PARAM;
+ if (pf->vf_base_mode_only)
+ dev_err(&pf->pdev->dev, "VF %d is in base mode only, promiscuous mode is not be supported\n",
+ vf->vf_id);
+ /* Lie to the VF on purpose. */
+ aq_ret = I40E_SUCCESS;
goto error_param;
}
/* Multicast promiscuous handling*/
vf->port_vlan_id,
NULL);
} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
continue;
aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
vf->vf_id,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
- goto error_param_int;
+ goto error_param;
}
}
vf->port_vlan_id,
NULL);
} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
continue;
aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
allmulti, NULL,
true);
aq_err = pf->hw.aq.asq_last_status;
- if (aq_ret)
+ if (aq_ret) {
dev_err(&pf->pdev->dev,
"VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
vf->vf_id, info->flags,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
+ goto error_param;
+ }
}
-error_param_int:
if (!aq_ret) {
dev_info(&pf->pdev->dev,
"VF %d successfully set unicast promiscuous mode\n",
goto error_param;
}
- if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true))
+ if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx]))
aq_ret = I40E_ERR_TIMEOUT;
error_param:
/* send the response to the VF */
goto error_param;
}
- if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false))
- aq_ret = I40E_ERR_TIMEOUT;
+ i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
error_param:
/* send the response to the VF */
}
/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
-#define I40E_VC_MAX_MAC_ADDR_PER_VF 8
+#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
#define I40E_VC_MAX_VLAN_PER_VF 8
/**
!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
dev_err(&pf->pdev->dev,
"VF is not trusted, switch the VF to trusted to add more functionality\n");
+ if (pf->vf_base_mode_only)
+ dev_err(&pf->pdev->dev, "VF %d is in base mode only, cannot add more than %d filters\n",
+ vf->vf_id, I40E_VC_MAX_MAC_ADDR_PER_VF);
ret = -EPERM;
}
return ret;
/* Lock once, because all function inside for loop accesses VSI's
* MAC filter list which needs to be protected using same lock.
*/
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* add new addresses to the list */
for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f;
- f = i40e_find_mac(vsi, al->list[i].addr, true, false);
- if (!f) {
- if (i40e_is_vsi_in_vlan(vsi))
- f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
- true, false);
- else
- f = i40e_add_filter(vsi, al->list[i].addr, -1,
- true, false);
- }
+ f = i40e_find_mac(vsi, al->list[i].addr);
+ if (!f)
+ f = i40e_add_mac_filter(vsi, al->list[i].addr);
if (!f) {
dev_err(&pf->pdev->dev,
"Unable to add MAC filter %pM for VF %d\n",
al->list[i].addr, vf->vf_id);
ret = I40E_ERR_PARAM;
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
} else {
vf->num_mac++;
}
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* program the updated filter list */
ret = i40e_sync_vsi_filters(vsi);
}
vsi = pf->vsi[vf->lan_vsi_idx];
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete addresses from the list */
for (i = 0; i < al->num_elements; i++)
- if (i40e_del_mac_all_vlan(vsi, al->list[i].addr, true, false)) {
+ if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
ret = I40E_ERR_INVALID_MAC_ADDR;
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
} else {
vf->num_mac--;
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* program the updated filter list */
ret = i40e_sync_vsi_filters(vsi);
!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
dev_err(&pf->pdev->dev,
"VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
+ if (pf->vf_base_mode_only)
+ dev_err(&pf->pdev->dev, "VF %d is in base mode only, cannot add more than %d vlans\n",
+ vf->vf_id, I40E_VC_MAX_VLAN_PER_VF);
goto error_param;
}
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
}
for (i = 0; i < vfl->num_elements; i++) {
- int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
- if (!ret)
- vf->num_vlan--;
+ i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+ vf->num_vlan--;
if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
false,
vfl->vlan_id[i],
NULL);
-
- if (ret)
- dev_err(&pf->pdev->dev,
- "Unable to delete VLAN filter %d for VF %d, error %d\n",
- vfl->vlan_id[i], vf->vf_id, ret);
}
error_param:
/* send the response back to the VF */
aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
aq_ret, (u8 *)vrh, len);
+ kfree(vrh);
return aq_ret;
}
for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
- /* read GLGEN_VFLRSTAT register to find out the flr vfs */
+ /* read GLGEN_VFLRSTAT register to find out the flr VFs */
vf = &pf->vf[vf_id];
reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
if (reg & BIT(bit_idx))
struct i40e_mac_filter *f;
struct i40e_vf *vf;
int ret = 0;
+ int bkt;
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
}
/* Lock once because below invoked function add/del_filter requires
- * mac_filter_list_lock to be held
+ * mac_filter_hash_lock to be held
*/
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete the temporary mac address */
if (!is_zero_ether_addr(vf->default_lan_addr.addr))
- i40e_del_filter(vsi, vf->default_lan_addr.addr,
- vf->port_vlan_id ? vf->port_vlan_id : -1,
- true, false);
+ i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
/* Delete all the filters for this VSI - we're going to kill it
* anyway.
*/
- list_for_each_entry(f, &vsi->mac_filter_list, list)
- i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist)
+ __i40e_del_filter(vsi, f);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
/* program mac filter */
return ret;
}
+#ifdef IFLA_VF_VLAN_INFO_MAX
+/**
+ * i40e_ndo_set_vf_port_vlan
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @vlan_id: mac address
+ * @qos: priority setting
+ * @vlan_proto: vlan protocol
+ *
+ * program VF vlan id and/or qos
+ **/
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ u16 vlan_id, u8 qos, __be16 vlan_proto)
+#else
/**
* i40e_ndo_set_vf_port_vlan
* @netdev: network interface device structure
**/
int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
int vf_id, u16 vlan_id, u8 qos)
+#endif /* IFLA_VF_VLAN_INFO_MAX */
{
u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
- bool is_vsi_in_vlan = false;
struct i40e_vsi *vsi;
struct i40e_vf *vf;
int ret = 0;
ret = -EINVAL;
goto error_pvid;
}
-
+#ifdef IFLA_VF_VLAN_INFO_MAX
+ if (vlan_proto != htons(ETH_P_8021Q)) {
+ dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
+ ret = -EPROTONOSUPPORT;
+ goto error_pvid;
+ }
+#endif
vf = &(pf->vf[vf_id]);
vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
/* duplicate request, so just return success */
goto error_pvid;
- spin_lock_bh(&vsi->mac_filter_list_lock);
- is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ /* Locked once because multiple functions below iterate list */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
- if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) {
+ if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) {
dev_err(&pf->pdev->dev,
"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
vf_id);
*/
if ((!(vlan_id || qos) ||
vlanprio != le16_to_cpu(vsi->info.pvid)) &&
- vsi->info.pvid)
- ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
-
- if (vsi->info.pvid) {
- /* kill old VLAN */
- ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
- VLAN_VID_MASK));
+ vsi->info.pvid) {
+ ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "remove VLAN failed, ret=%d, aq_err=%d\n",
- ret, pf->hw.aq.asq_last_status);
+ "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
+ vsi->back->hw.aq.asq_last_status);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ goto error_pvid;
}
}
+
+ if (vsi->info.pvid) {
+ /* remove all filters on the old VLAN */
+ i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
+ VLAN_VID_MASK));
+ }
+
if (vlan_id || qos)
ret = i40e_vsi_add_pvid(vsi, vlanprio);
else
dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
vlan_id, qos, vf_id);
- /* add new VLAN filter */
- ret = i40e_vsi_add_vlan(vsi, vlan_id);
+ /* add new VLAN filter for each MAC */
+ ret = i40e_add_vlan_all_mac(vsi, vlan_id);
if (ret) {
dev_info(&vsi->back->pdev->dev,
"add VF VLAN failed, ret=%d aq_err=%d\n", ret,
vsi->back->hw.aq.asq_last_status);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_pvid;
}
- /* Kill non-vlan MAC filters - ignore error return since
- * there might not be any non-vlan MAC filters.
- */
- i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY);
+
+ /* remove the previously added non-VLAN MAC filters */
+ i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
}
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ /* Schedule the worker thread to take care of applying changes */
+ i40e_service_event_schedule(vsi->back);
+
if (ret) {
dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
goto error_pvid;
}
+
/* The Port VLAN needs to be saved across resets the same as the
* default LAN MAC address.
*/
case I40E_LINK_SPEED_40GB:
speed = 40000;
break;
+ case I40E_LINK_SPEED_25GB:
+ speed = 25000;
+ break;
case I40E_LINK_SPEED_20GB:
speed = 20000;
break;
max_tx_rate = 50;
}
- /* TX rate credits are in values of 50Mbps, 0 is disabled*/
+ /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
max_tx_rate / I40E_BW_CREDIT_DIVISOR,
- I40E_MAX_BW_INACTIVE_ACCUM,
- NULL);
+ I40E_MAX_BW_INACTIVE_ACCUM, NULL);
if (ret) {
dev_err(&pf->pdev->dev, "Unable to set tx rate, error code %d.\n",
ret);
vf = &pf->vf[vf_id];
+ /* if vf is in base mode, make it untrusted */
+ if (pf->vf_base_mode_only)
+ setting = false;
if (setting == vf->trusted)
goto out;
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
- unsigned int tx_rate; /* tx bandwidth limit in Mbps */
+ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
#ifdef HAVE_NDO_SET_VF_LINK_STATE
bool link_forced;
bool link_up; /* only valid if VF link is forced */
/* VF configuration related iplink handlers */
int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
+#ifdef IFLA_VF_VLAN_INFO_MAX
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ u16 vlan_id, u8 qos, __be16 vlan_proto);
+#else
int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
int vf_id, u16 vlan_id, u8 qos);
+#endif
#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
int max_tx_rate);
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#endif /* HAVE_SET_RX_MODE */
#endif /* 3.16.0 */
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) )
+#endif /* 3.17.0 */
+
/******************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) )
#ifndef NO_PTP_SUPPORT
}
#endif
#endif
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) )
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)))
+#ifdef CONFIG_SPARC
+#include <asm/idprom.h>
+#include <asm/prom.h>
+#endif
+int _kc_eth_platform_get_mac_address(struct device *dev, u8 *mac_addr)
+{
+ const unsigned char *addr;
+ struct device_node *dp;
+
+ if (dev_is_pci(dev))
+ dp = pci_device_to_OF_node(to_pci_dev(dev));
+ else
+#if defined(HAVE_STRUCT_DEVICE_OF_NODE) && defined(CONFIG_OF)
+ dp = dev->of_node;
+#else
+ dp = NULL;
+#endif
+
+ addr = NULL;
+ if (dp)
+ addr = of_get_mac_address(dp);
+#ifdef CONFIG_SPARC
+ /* Kernel hasn't implemented arch_get_platform_mac_address, but we
+ * should handle the SPARC case here since it was supported
+ * originally. This is replaced by arch_get_platform_mac_address()
+ * upstream.
+ */
+ if (!addr)
+ addr = idprom->id_ethaddr;
+#endif
+ if (!addr)
+ return -ENODEV;
+
+ ether_addr_copy(mac_addr, addr);
+ return 0;
+}
+#endif /* !(RHEL_RELEASE >= 7.3) */
+#endif
/*******************************************************************************
*
* Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/string.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#ifndef NSEC_PER_MSEC
+#define NSEC_PER_MSEC 1000000L
+#endif
#include <net/ipv6.h>
/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */
#ifndef UTS_RELEASE
#ifndef SPEED_5000
#define SPEED_5000 5000
#endif
+#ifndef SPEED_25000
+#define SPEED_25000 25000
+#endif
+#ifndef SPEED_50000
+#define SPEED_50000 50000
+#endif
+#ifndef SPEED_100000
+#define SPEED_100000 100000
+#endif
#ifndef RHEL_RELEASE_VERSION
#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b))
*/
#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d))
-/* SuSE version macro is the same as Linux kernel version */
+/* SuSE version macros are the same as Linux kernel version macro */
#ifndef SLE_VERSION
-#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c)
+#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c)
#endif
+#define SLE_LOCALVERSION(a,b,c) KERNEL_VERSION(a,b,c)
#ifdef CONFIG_SUSE_KERNEL
#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) )
/* SLES11 GA is 2.6.27 based */
/* SLES11 SP1 is 2.6.32 based */
#define SLE_VERSION_CODE SLE_VERSION(11,1,0)
#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) )
-/* SLES11 SP2 is 3.0.13 based */
+/* SLES11 SP2 GA is 3.0.13-0.27 */
#define SLE_VERSION_CODE SLE_VERSION(11,2,0)
#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76)))
-/* SLES11 SP3 is 3.0.76 based */
+/* SLES11 SP3 GA is 3.0.76-0.11 */
#define SLE_VERSION_CODE SLE_VERSION(11,3,0)
-#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101)))
-/* SLES11 SP4 is 3.0.101 based */
-#define SLE_VERSION_CODE SLE_VERSION(11,4,0)
-#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28)))
-/* SLES12 GA is 3.12.28 based */
+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101))
+ #if (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(0,8,0))
+ /* some SLES11sp2 update kernels up to 3.0.101-0.7.x */
+ #define SLE_VERSION_CODE SLE_VERSION(11,2,0)
+ #elif (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(63,0,0))
+ /* most SLES11sp3 update kernels */
+ #define SLE_VERSION_CODE SLE_VERSION(11,3,0)
+ #else
+ /* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */
+ #define SLE_VERSION_CODE SLE_VERSION(11,4,0)
+ #endif
+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28))
+/* SLES12 GA is 3.12.28-4
+ * kernel updates 3.12.xx-<33 through 52>[.yy] */
#define SLE_VERSION_CODE SLE_VERSION(12,0,0)
+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,49))
+/* SLES12 SP1 GA is 3.12.49-11
+ * updates 3.12.xx-60.yy where xx={51..} */
+#define SLE_VERSION_CODE SLE_VERSION(12,1,0)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,21))
+/* SLES12 SP2 GA is 4.4.21-69 */
+#define SLE_VERSION_CODE SLE_VERSION(12,2,0)
/* new SLES kernels must be added here with >= based on kernel
* the idea is to order from newest to oldest and just catch all
* of them using the >=
*/
-#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,47)))
-/* SLES12 SP1 is 3.12.47-based */
-#define SLE_VERSION_CODE SLE_VERSION(12,1,0)
#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */
#endif /* CONFIG_SUSE_KERNEL */
#ifndef SLE_VERSION_CODE
#define SLE_VERSION_CODE 0
#endif /* SLE_VERSION_CODE */
+#ifndef SLE_LOCALVERSION_CODE
+#define SLE_LOCALVERSION_CODE 0
+#endif /* SLE_LOCALVERSION_CODE */
#ifdef __KLOCWORK__
#ifdef ARRAY_SIZE
#endif
#else /* 2.6.19 */
#include <linux/aer.h>
-#include <linux/string.h>
#include <linux/pci_hotplug.h>
#endif /* < 2.6.19 */
#define ETH_P_PAUSE 0x8808
#endif
+static inline int compound_order(struct page *page)
+{
+ return 0;
+}
#else /* 2.6.22 */
#define ETH_TYPE_TRANS_SETS_DEV
#define HAVE_NETDEV_STATS_IN_NETDEV
#define HAVE_TX_MQ
#endif
+#ifndef DMA_ATTR_WEAK_ORDERING
+#define DMA_ATTR_WEAK_ORDERING 0
+#endif
+
#ifdef HAVE_TX_MQ
extern void _kc_netif_tx_stop_all_queues(struct net_device *);
extern void _kc_netif_tx_wake_all_queues(struct net_device *);
#undef HAVE_IXGBE_DEBUG_FS
#undef HAVE_IGB_DEBUG_FS
#else /* < 2.6.27 */
+#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set
+static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep,
+ __u32 speed)
+{
+ ep->speed = (__u16)(speed & 0xFFFF);
+ ep->speed_hi = (__u16)(speed >> 16);
+}
#define HAVE_TX_MQ
#define HAVE_NETDEV_SELECT_QUEUE
#ifdef CONFIG_DEBUG_FS
#define ADVERTISED_10000baseKR_Full (1 << 19)
#endif
+static inline unsigned long dev_trans_start(struct net_device *dev)
+{
+ return dev->trans_start;
+}
#else /* < 2.6.31 */
#ifndef HAVE_NETDEV_STORAGE_ADDRESS
#define HAVE_NETDEV_STORAGE_ADDRESS
#ifndef __percpu
#define __percpu
#endif /* __percpu */
+
#ifndef PORT_DA
#define PORT_DA PORT_OTHER
-#endif
+#endif /* PORT_DA */
#ifndef PORT_NONE
#define PORT_NONE PORT_OTHER
#endif
#endif
#endif /* RHEL_RELEASE_CODE */
+#ifndef dev_is_pci
+#define dev_is_pci(d) ((d)->bus == &pci_bus_type)
+#endif
+
#ifndef ETH_FLAG_NTUPLE
#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE
#endif
#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))
#define HAVE_IRQ_AFFINITY_HINT
#endif
+struct device_node;
#else /* < 2.6.35 */
+#define HAVE_STRUCT_DEVICE_OF_NODE
#define HAVE_PM_QOS_REQUEST_LIST
#define HAVE_IRQ_AFFINITY_HINT
+#include <linux/of.h>
#endif /* < 2.6.35 */
/*****************************************************************************/
#ifndef HAVE_NDO_SET_FEATURES
#define HAVE_NDO_SET_FEATURES
#endif
+#define HAVE_IRQ_AFFINITY_NOTIFY
#endif /* < 2.6.39 */
/*****************************************************************************/
#ifndef ETH_P_8021AD
#define ETH_P_8021AD 0x88A8
#endif
+
+/* Stub definition for !CONFIG_OF is introduced later */
+#ifdef CONFIG_OF
+static inline struct device_node *
+pci_device_to_OF_node(struct pci_dev __maybe_unused *pdev)
+{
+#ifdef HAVE_STRUCT_DEVICE_OF_NODE
+ return pdev ? pdev->dev.of_node : NULL;
+#else
+ return NULL;
+#endif /* !HAVE_STRUCT_DEVICE_OF_NODE */
+}
+#endif /* CONFIG_OF */
#else /* < 3.1.0 */
#ifndef HAVE_DCBNL_IEEE_DELAPP
#define HAVE_DCBNL_IEEE_DELAPP
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) )
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG 64
+#endif
+
#ifndef ether_addr_equal
static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2)
{
#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2))
#endif
+/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */
+#ifdef CONFIG_OF_NET
+static inline int of_get_phy_mode(struct device_node __always_unused *np)
+{
+ return -ENODEV;
+}
+
+static inline const void *
+of_get_mac_address(struct device_node __always_unused *np)
+{
+ return NULL;
+}
+#endif
#else
+#include <linux/of_net.h>
#define HAVE_FDB_OPS
#define HAVE_ETHTOOL_GET_TS_INFO
#endif /* < 3.5.0 */
#define __GFP_MEMALLOC 0
#endif
+#ifndef eth_broadcast_addr
+#define eth_broadcast_addr _kc_eth_broadcast_addr
+static inline void _kc_eth_broadcast_addr(u8 *addr)
+{
+ memset(addr, 0xff, ETH_ALEN);
+}
+#endif
+
#ifndef eth_random_addr
#define eth_random_addr _kc_eth_random_addr
static inline void _kc_eth_random_addr(u8 *addr)
addr[0] |= 0x02; /* set local assignment */
}
#endif /* eth_random_addr */
+
+#ifndef DMA_ATTR_SKIP_CPU_SYNC
+#define DMA_ATTR_SKIP_CPU_SYNC 0
+#endif
#else /* < 3.6.0 */
#define HAVE_STRUCT_PAGE_PFMEMALLOC
#endif /* < 3.6.0 */
* mmd_eee_adv_to_ethtool_adv_t
* @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
*
- * A small helper function that translates the MMD EEE Advertisment (7.60)
+ * A small helper function that translates the MMD EEE Advertisement (7.60)
* and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
* settings.
*/
#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi)
#endif /* !RHEL6.8+ */
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6))
+#include <linux/hashtable.h>
+#else
+
+#define DEFINE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)] = \
+ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+
+#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)] __read_mostly = \
+ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+
+#define DECLARE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)]
+
+#define HASH_SIZE(name) (ARRAY_SIZE(name))
+#define HASH_BITS(name) ilog2(HASH_SIZE(name))
+
+/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
+#define hash_min(val, bits) \
+ (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
+
+static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ INIT_HLIST_HEAD(&ht[i]);
+}
+
+#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
+
+#define hash_add(hashtable, node, key) \
+ hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
+
+static inline bool hash_hashed(struct hlist_node *node)
+{
+ return !hlist_unhashed(node);
+}
+
+static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ if (!hlist_empty(&ht[i]))
+ return false;
+
+ return true;
+}
+
+#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
+
+static inline void hash_del(struct hlist_node *node)
+{
+ hlist_del_init(node);
+}
+#endif /* RHEL >= 6.6 */
+
#else /* >= 3.7.0 */
+#include <linux/hashtable.h>
#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS
#define USE_CONST_DEV_UC_CHAR
#endif /* >= 3.7.0 */
#define FLOW_MAC_EXT 0x40000000
#endif /* FLOW_MAC_EXT */
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))
+#define HAVE_SRIOV_CONFIGURE
+#endif
+
#else /* >= 3.8.0 */
#ifndef __devinit
#define __devinit
#undef hlist_entry_safe
#define hlist_entry_safe(ptr, type, member) \
- (ptr) ? hlist_entry(ptr, type, member) : NULL
+ ({ typeof(ptr) ____ptr = (ptr); \
+ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
+ })
#undef hlist_for_each_entry
#define hlist_for_each_entry(pos, head, member) \
pos && ({ n = pos->member.next; 1; }); \
pos = hlist_entry_safe(n, typeof(*pos), member))
+#undef hash_for_each
+#define hash_for_each(name, bkt, obj, member) \
+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+ (bkt)++)\
+ hlist_for_each_entry(obj, &name[bkt], member)
+
+#undef hash_for_each_safe
+#define hash_for_each_safe(name, bkt, tmp, obj, member) \
+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+ (bkt)++)\
+ hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
+
+#undef hash_for_each_possible
+#define hash_for_each_possible(name, obj, member, key) \
+ hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
+
+#undef hash_for_each_possible_safe
+#define hash_for_each_possible_safe(name, obj, tmp, member, key) \
+ hlist_for_each_entry_safe(obj, tmp,\
+ &name[hash_min(key, HASH_BITS(name))], member)
+
#ifdef CONFIG_XPS
extern int __kc_netif_set_xps_queue(struct net_device *, struct cpumask *, u16);
#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx))
#ifndef PCI_DEVID
#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
#endif
+
+/* The definitions for these functions when CONFIG_OF_NET is defined are
+ * pulled in from <linux/of_net.h>. For kernels older than 3.5 we already have
+ * backports for when CONFIG_OF_NET is true. These are separated and
+ * duplicated in order to cover all cases so that all kernels get either the
+ * real definitions (when CONFIG_OF_NET is defined) or the stub definitions
+ * (when CONFIG_OF_NET is not defined, or the kernel is too old to have real
+ * definitions).
+ */
+#ifndef CONFIG_OF_NET
+static inline int of_get_phy_mode(struct device_node __always_unused *np)
+{
+ return -ENODEV;
+}
+
+static inline const void *
+of_get_mac_address(struct device_node __always_unused *np)
+{
+ return NULL;
+}
+#endif
+
#else /* >= 3.10.0 */
#define HAVE_ENCAP_TSO_OFFLOAD
#define USE_DEFAULT_FDB_DEL_DUMP
#define HAVE_SKB_INNER_NETWORK_HEADER
+#if (RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)))
+#define HAVE_RHEL7_NET_DEVICE_OPS_EXT
+#define HAVE_GENEVE_RX_OFFLOAD
+#ifdef ETHTOOL_GLINKSETTINGS
+#define HAVE_ETHTOOL_25G_BITS
+#endif /* ETHTOOL_GLINKSETTINGS */
+#endif
#endif /* >= 3.10.0 */
/*****************************************************************************/
(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)))
#define HAVE_NDO_SET_VF_LINK_STATE
#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))
+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
+#endif
#else /* >= 3.11.0 */
#define HAVE_NDO_SET_VF_LINK_STATE
#define HAVE_SKB_INNER_PROTOCOL
#define list_next_entry(pos, member) \
list_entry((pos)->member.next, typeof(*(pos)), member)
#endif
+#ifndef list_prev_entry
+#define list_prev_entry(pos, member) \
+ list_entry((pos)->member.prev, typeof(*(pos)), member)
+#endif
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,20) )
+#define devm_kcalloc(dev, cnt, size, flags) \
+ devm_kzalloc(dev, cnt * size, flags)
+#endif /* > 2.6.20 */
#else /* >= 3.13.0 */
#define HAVE_VXLAN_CHECKS
#define HAVE_NDO_SELECT_QUEUE_ACCEL
#endif
#define HAVE_NET_GET_RANDOM_ONCE
+#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS
#endif
/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) )
+#ifndef U16_MAX
+#define U16_MAX ((u16)~0U)
+#endif
+
#ifndef U32_MAX
#define U32_MAX ((u32)~0U)
#endif
#define PKT_HASH_TYPE_L3 2
#define PKT_HASH_TYPE_L4 3
+enum _kc_pkt_hash_types {
+ _KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE,
+ _KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2,
+ _KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3,
+ _KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4,
+};
+#define pkt_hash_types _kc_pkt_hash_types
+
#define skb_set_hash __kc_skb_set_hash
static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb,
u32 __maybe_unused hash,
#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
#endif /* 3.16.0 */
+/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) )
#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \
RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \
#endif /* !(RHEL6.8<RHEL7.0) && !RHEL7.2+ */
#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a)
+
#else
#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT
+#include <linux/time64.h>
#endif /* 3.17.0 */
+/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) )
#ifndef NO_PTP_SUPPORT
#include <linux/errqueue.h>
#define HAVE_SKB_INNER_PROTOCOL_TYPE
#endif /* 3.18.0 */
+/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) )
#else
#define HAVE_NDO_FEATURES_CHECK
#endif /* 3.18.4 */
+/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) )
/* netdev_phys_port_id renamed to netdev_phys_item_id */
#define netdev_phys_item_id netdev_phys_port_id
#ifndef READ_ONCE
#define READ_ONCE(_x) ACCESS_ONCE(_x)
#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))
+#define HAVE_NDO_FDB_ADD_VID
+#endif
#else /* 3.19.0 */
#define HAVE_NDO_FDB_ADD_VID
#define HAVE_RXFH_HASHFUNC
#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS
#endif /* 3.19.0 */
+/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) )
/* vlan_tx_xx functions got renamed to skb_vlan */
#ifndef skb_vlan_tag_get
#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1))
#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H
#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))
+#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS
+#endif
#else
#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H
#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS
#endif /* 3.20.0 */
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) )
+/* Definition for CONFIG_OF was introduced earlier */
+#if !defined(CONFIG_OF) && \
+ !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))
+static inline struct device_node *
+pci_device_to_OF_node(const struct pci_dev __always_unused *pdev) { return NULL; }
+#endif /* !CONFIG_OF && RHEL < 7.3 */
+#endif /* < 4.0 */
+
+/*****************************************************************************/
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) )
#ifndef NO_PTP_SUPPORT
#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H
{
tc->nsec += delta;
}
+
+static inline struct net_device *
+of_find_net_device_by_node(struct device_node __always_unused *np)
+{
+ return NULL;
+}
+
#define timecounter_adjtime __kc_timecounter_adjtime
#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))
+#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
+#endif
#else
#define HAVE_PTP_CLOCK_INFO_GETTIME64
#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
#define HAVE_NDO_SET_VF_RSS_QUERY_EN
#endif /* 4,1,0 */
+/*****************************************************************************/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9))
-#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0)))
+#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \
+ !((SLE_VERSION_CODE == SLE_VERSION(11,3,0)) && \
+ (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(0,47,71))) && \
+ !((SLE_VERSION_CODE == SLE_VERSION(11,4,0)) && \
+ (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(65,0,0))) && \
+ !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0)))
static inline bool page_is_pfmemalloc(struct page __maybe_unused *page)
{
#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC
return false;
#endif
}
-#endif /* !SLES12sp1 */
+#endif /* !RHEL7.2+ && !SLES11sp3(3.0.101-0.47.71+ update) && !SLES11sp4(3.0.101-65+ update) & !SLES12sp1+ */
#else
#undef HAVE_STRUCT_PAGE_PFMEMALLOC
#endif /* 4.1.9 */
+/*****************************************************************************/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0))
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) && \
+ !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0)))
+#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFULL
+#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000ULL
+#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32
+static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
+{
+ return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
+};
+
+static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
+{
+ return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
+ ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+};
+#endif /* ! RHEL >= 7.2 && ! SLES >= 12.1 */
#else
#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT
#endif /* 4.2.0 */
+/*****************************************************************************/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0))
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))
+#define HAVE_NDO_SET_VF_TRUST
+#endif /* (RHEL_RELEASE >= 7.3) */
#ifndef CONFIG_64BIT
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
#include <asm-generic/io-64-nonatomic-lo-hi.h> /* 32-bit readq/writeq */
#endif
#endif /* < 3.3.0 */
#endif /* !CONFIG_64BIT */
-#else
+#else /* < 4.4.0 */
#define HAVE_NDO_SET_VF_TRUST
#ifndef CONFIG_64BIT
#endif /* !CONFIG_64BIT */
#endif /* 4.4.0 */
+/*****************************************************************************/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0))
/* protect against a likely backport */
#ifndef NETIF_F_CSUM_MASK
#ifndef NETIF_F_SCTP_CRC
#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM
#endif /* NETIF_F_SCTP_CRC */
-#else
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)))
+#define eth_platform_get_mac_address _kc_eth_platform_get_mac_address
+extern int _kc_eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
+#endif /* !(RHEL_RELEASE >= 7.3) */
+#else /* 4.5.0 */
#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) )
#define HAVE_GENEVE_RX_OFFLOAD
#endif /* < 4.8.0 */
#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD
#endif /* 4.5.0 */
+/*****************************************************************************/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0))
-#if !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21))
+#if !(UBUNTU_VERSION_CODE && \
+ UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) && \
+ !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)))
static inline void napi_consume_skb(struct sk_buff *skb,
int __always_unused budget)
{
* sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
}
+#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)))
static inline void page_ref_inc(struct page *page)
{
- atomic_inc(&page->_count);
+ get_page(page);
}
+#else
+#define HAVE_PAGE_COUNT_BULK_UPDATE
+#endif
+#else /* 4.6.0 */
+#define HAVE_PAGE_COUNT_BULK_UPDATE
#endif /* 4.6.0 */
+/*****************************************************************************/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0))
#else
#define HAVE_NETIF_TRANS_UPDATE
+#ifdef ETHTOOL_GLINKSETTINGS
+#define HAVE_ETHTOOL_25G_BITS
+#endif /* ETHTOOL_GLINKSETTINGS */
#endif /* 4.7.0 */
+/*****************************************************************************/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0))
enum udp_parsable_tunnel_type {
UDP_TUNNEL_TYPE_VXLAN,
sa_family_t sa_family;
__be16 port;
};
+
+static inline int
+#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME
+pci_request_io_regions(struct pci_dev *pdev, char *name)
+#else
+pci_request_io_regions(struct pci_dev *pdev, const char *name)
+#endif
+{
+ return pci_request_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_IO), name);
+}
+
+static inline void
+pci_release_io_regions(struct pci_dev *pdev)
+{
+ return pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_IO));
+}
+
+static inline int
+#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME
+pci_request_mem_regions(struct pci_dev *pdev, char *name)
+#else
+pci_request_mem_regions(struct pci_dev *pdev, const char *name)
+#endif
+{
+ return pci_request_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM), name);
+}
+
+static inline void
+pci_release_mem_regions(struct pci_dev *pdev)
+{
+ return pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+}
#else
#define HAVE_UDP_ENC_RX_OFFLOAD
#endif /* 4.8.0 */
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0))
+#else
+#endif /* 4.9.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
+#ifndef dma_map_page_attrs
+#define dma_map_page_attrs __kc_dma_map_page_attrs
+static inline dma_addr_t __kc_dma_map_page_attrs(struct device *dev,
+ struct page *page,
+ size_t offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long __always_unused attrs)
+{
+ return dma_map_page(dev, page, offset, size, dir);
+}
+#endif
+
+#ifndef dma_unmap_page_attrs
+#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs
+static inline void __kc_dma_unmap_page_attrs(struct device *dev,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir,
+ unsigned long __always_unused attrs)
+{
+ dma_unmap_page(dev, addr, size, dir);
+}
+#endif
+
+static inline void __page_frag_cache_drain(struct page *page,
+ unsigned int count)
+{
+#ifdef HAVE_PAGE_COUNT_BULK_UPDATE
+ if (!page_ref_sub_and_test(page, count))
+ return;
+
+ init_page_count(page);
+#else
+ BUG_ON(count > 1);
+ if (!count)
+ return;
+#endif
+ __free_pages(page, compound_order(page));
+}
+#else
+#define HAVE_NETDEVICE_MIN_MAX_MTU
+#define HAVE_SWIOTLB_SKIP_CPU_SYNC
+#endif /* 4.10.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0))
+#else
+#define HAVE_VOID_NDO_GET_STATS64
+#endif /* 4.11.0 */
+
#endif /* _KCOMPAT_H_ */